0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/module.h>
0011 #include <linux/init.h>
0012 #include <linux/kernel.h>
0013 #include <linux/kmemleak.h>
0014 #include <linux/delay.h>
0015 #include <linux/gfp.h>
0016 #include <linux/io.h>
0017 #include <linux/atomic.h>
0018 #include <asm/debug.h>
0019 #include <asm/qdio.h>
0020 #include <asm/ipl.h>
0021
0022 #include "cio.h"
0023 #include "css.h"
0024 #include "device.h"
0025 #include "qdio.h"
0026 #include "qdio_debug.h"
0027
0028 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
0029 "Jan Glauber <jang@linux.vnet.ibm.com>");
0030 MODULE_DESCRIPTION("QDIO base support");
0031 MODULE_LICENSE("GPL");
0032
0033 static inline int do_siga_sync(unsigned long schid,
0034 unsigned long out_mask, unsigned long in_mask,
0035 unsigned int fc)
0036 {
0037 int cc;
0038
0039 asm volatile(
0040 " lgr 0,%[fc]\n"
0041 " lgr 1,%[schid]\n"
0042 " lgr 2,%[out]\n"
0043 " lgr 3,%[in]\n"
0044 " siga 0\n"
0045 " ipm %[cc]\n"
0046 " srl %[cc],28\n"
0047 : [cc] "=&d" (cc)
0048 : [fc] "d" (fc), [schid] "d" (schid),
0049 [out] "d" (out_mask), [in] "d" (in_mask)
0050 : "cc", "0", "1", "2", "3");
0051 return cc;
0052 }
0053
0054 static inline int do_siga_input(unsigned long schid, unsigned long mask,
0055 unsigned long fc)
0056 {
0057 int cc;
0058
0059 asm volatile(
0060 " lgr 0,%[fc]\n"
0061 " lgr 1,%[schid]\n"
0062 " lgr 2,%[mask]\n"
0063 " siga 0\n"
0064 " ipm %[cc]\n"
0065 " srl %[cc],28\n"
0066 : [cc] "=&d" (cc)
0067 : [fc] "d" (fc), [schid] "d" (schid), [mask] "d" (mask)
0068 : "cc", "0", "1", "2");
0069 return cc;
0070 }
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083 static inline int do_siga_output(unsigned long schid, unsigned long mask,
0084 unsigned int *bb, unsigned long fc,
0085 unsigned long aob)
0086 {
0087 int cc;
0088
0089 asm volatile(
0090 " lgr 0,%[fc]\n"
0091 " lgr 1,%[schid]\n"
0092 " lgr 2,%[mask]\n"
0093 " lgr 3,%[aob]\n"
0094 " siga 0\n"
0095 " lgr %[fc],0\n"
0096 " ipm %[cc]\n"
0097 " srl %[cc],28\n"
0098 : [cc] "=&d" (cc), [fc] "+&d" (fc)
0099 : [schid] "d" (schid), [mask] "d" (mask), [aob] "d" (aob)
0100 : "cc", "0", "1", "2", "3");
0101 *bb = fc >> 31;
0102 return cc;
0103 }
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
0117 int start, int count, int auto_ack)
0118 {
0119 int tmp_count = count, tmp_start = start, nr = q->nr;
0120 unsigned int ccq = 0;
0121
0122 qperf_inc(q, eqbs);
0123
0124 if (!q->is_input_q)
0125 nr += q->irq_ptr->nr_input_qs;
0126 again:
0127 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
0128 auto_ack);
0129
0130 switch (ccq) {
0131 case 0:
0132 case 32:
0133
0134 return count - tmp_count;
0135 case 96:
0136
0137 qperf_inc(q, eqbs_partial);
0138 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x",
0139 tmp_count);
0140 return count - tmp_count;
0141 case 97:
0142
0143 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
0144 goto again;
0145 default:
0146 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
0147 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
0148 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
0149 q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr,
0150 q->first_to_check, count, q->irq_ptr->int_parm);
0151 return 0;
0152 }
0153 }
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166 static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
0167 int count)
0168 {
0169 unsigned int ccq = 0;
0170 int tmp_count = count, tmp_start = start;
0171 int nr = q->nr;
0172
0173 qperf_inc(q, sqbs);
0174
0175 if (!q->is_input_q)
0176 nr += q->irq_ptr->nr_input_qs;
0177 again:
0178 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
0179
0180 switch (ccq) {
0181 case 0:
0182 case 32:
0183
0184 WARN_ON_ONCE(tmp_count);
0185 return count - tmp_count;
0186 case 96:
0187
0188 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
0189 qperf_inc(q, sqbs_partial);
0190 goto again;
0191 default:
0192 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
0193 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
0194 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
0195 q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr,
0196 q->first_to_check, count, q->irq_ptr->int_parm);
0197 return 0;
0198 }
0199 }
0200
0201
0202
0203
0204
0205 static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
0206 unsigned char *state, unsigned int count,
0207 int auto_ack)
0208 {
0209 unsigned char __state = 0;
0210 int i = 1;
0211
0212 if (is_qebsm(q))
0213 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
0214
0215
0216 __state = q->slsb.val[bufnr];
0217
0218
0219 if (__state & SLSB_OWNER_CU)
0220 goto out;
0221
0222 for (; i < count; i++) {
0223 bufnr = next_buf(bufnr);
0224
0225
0226 if (q->slsb.val[bufnr] != __state)
0227 break;
0228 }
0229
0230 out:
0231 *state = __state;
0232 return i;
0233 }
0234
0235 static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
0236 unsigned char *state, int auto_ack)
0237 {
0238 return get_buf_states(q, bufnr, state, 1, auto_ack);
0239 }
0240
0241
0242 static inline int set_buf_states(struct qdio_q *q, int bufnr,
0243 unsigned char state, int count)
0244 {
0245 int i;
0246
0247 if (is_qebsm(q))
0248 return qdio_do_sqbs(q, state, bufnr, count);
0249
0250
0251 mb();
0252
0253 for (i = 0; i < count; i++) {
0254 WRITE_ONCE(q->slsb.val[bufnr], state);
0255 bufnr = next_buf(bufnr);
0256 }
0257
0258
0259 mb();
0260
0261 return count;
0262 }
0263
0264 static inline int set_buf_state(struct qdio_q *q, int bufnr,
0265 unsigned char state)
0266 {
0267 return set_buf_states(q, bufnr, state, 1);
0268 }
0269
0270
0271 static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
0272 {
0273 struct qdio_q *q;
0274 int i;
0275
0276 for_each_input_queue(irq_ptr, q, i)
0277 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
0278 QDIO_MAX_BUFFERS_PER_Q);
0279 for_each_output_queue(irq_ptr, q, i)
0280 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
0281 QDIO_MAX_BUFFERS_PER_Q);
0282 }
0283
0284 static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
0285 unsigned int input)
0286 {
0287 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
0288 unsigned int fc = QDIO_SIGA_SYNC;
0289 int cc;
0290
0291 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
0292 qperf_inc(q, siga_sync);
0293
0294 if (is_qebsm(q)) {
0295 schid = q->irq_ptr->sch_token;
0296 fc |= QDIO_SIGA_QEBSM_FLAG;
0297 }
0298
0299 cc = do_siga_sync(schid, output, input, fc);
0300 if (unlikely(cc))
0301 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
0302 return (cc) ? -EIO : 0;
0303 }
0304
0305 static inline int qdio_sync_input_queue(struct qdio_q *q)
0306 {
0307 return qdio_siga_sync(q, 0, q->mask);
0308 }
0309
0310 static inline int qdio_sync_output_queue(struct qdio_q *q)
0311 {
0312 return qdio_siga_sync(q, q->mask, 0);
0313 }
0314
0315 static inline int qdio_siga_sync_q(struct qdio_q *q)
0316 {
0317 if (q->is_input_q)
0318 return qdio_sync_input_queue(q);
0319 else
0320 return qdio_sync_output_queue(q);
0321 }
0322
0323 static int qdio_siga_output(struct qdio_q *q, unsigned int count,
0324 unsigned int *busy_bit, unsigned long aob)
0325 {
0326 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
0327 unsigned int fc = QDIO_SIGA_WRITE;
0328 u64 start_time = 0;
0329 int retries = 0, cc;
0330
0331 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) {
0332 if (count > 1)
0333 fc = QDIO_SIGA_WRITEM;
0334 else if (aob)
0335 fc = QDIO_SIGA_WRITEQ;
0336 }
0337
0338 if (is_qebsm(q)) {
0339 schid = q->irq_ptr->sch_token;
0340 fc |= QDIO_SIGA_QEBSM_FLAG;
0341 }
0342 again:
0343 cc = do_siga_output(schid, q->mask, busy_bit, fc, aob);
0344
0345
0346 if (unlikely(*busy_bit)) {
0347 retries++;
0348
0349 if (!start_time) {
0350 start_time = get_tod_clock_fast();
0351 goto again;
0352 }
0353 if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE)
0354 goto again;
0355 }
0356 if (retries) {
0357 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
0358 "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
0359 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
0360 }
0361 return cc;
0362 }
0363
0364 static inline int qdio_siga_input(struct qdio_q *q)
0365 {
0366 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
0367 unsigned int fc = QDIO_SIGA_READ;
0368 int cc;
0369
0370 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
0371 qperf_inc(q, siga_read);
0372
0373 if (is_qebsm(q)) {
0374 schid = q->irq_ptr->sch_token;
0375 fc |= QDIO_SIGA_QEBSM_FLAG;
0376 }
0377
0378 cc = do_siga_input(schid, q->mask, fc);
0379 if (unlikely(cc))
0380 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
0381 return (cc) ? -EIO : 0;
0382 }
0383
0384 int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
0385 unsigned char *state)
0386 {
0387 if (qdio_need_siga_sync(q->irq_ptr))
0388 qdio_siga_sync_q(q);
0389 return get_buf_state(q, bufnr, state, 0);
0390 }
0391
0392 static inline void qdio_stop_polling(struct qdio_q *q)
0393 {
0394 if (!q->u.in.batch_count)
0395 return;
0396
0397 qperf_inc(q, stop_polling);
0398
0399
0400 set_buf_states(q, q->u.in.batch_start, SLSB_P_INPUT_NOT_INIT,
0401 q->u.in.batch_count);
0402 q->u.in.batch_count = 0;
0403 }
0404
0405 static inline void account_sbals(struct qdio_q *q, unsigned int count)
0406 {
0407 q->q_stats.nr_sbal_total += count;
0408 q->q_stats.nr_sbals[ilog2(count)]++;
0409 }
0410
0411 static void process_buffer_error(struct qdio_q *q, unsigned int start,
0412 int count)
0413 {
0414
0415 if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q &&
0416 q->sbal[start]->element[15].sflags == 0x10) {
0417 qperf_inc(q, target_full);
0418 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start);
0419 return;
0420 }
0421
0422 DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
0423 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
0424 DBF_ERROR("FTC:%3d C:%3d", start, count);
0425 DBF_ERROR("F14:%2x F15:%2x",
0426 q->sbal[start]->element[14].sflags,
0427 q->sbal[start]->element[15].sflags);
0428 }
0429
0430 static inline void inbound_handle_work(struct qdio_q *q, unsigned int start,
0431 int count, bool auto_ack)
0432 {
0433
0434 if (!auto_ack)
0435 set_buf_state(q, add_buf(start, count - 1), SLSB_P_INPUT_ACK);
0436
0437 if (!q->u.in.batch_count)
0438 q->u.in.batch_start = start;
0439 q->u.in.batch_count += count;
0440 }
0441
0442 static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start,
0443 unsigned int *error)
0444 {
0445 unsigned char state = 0;
0446 int count;
0447
0448 q->timestamp = get_tod_clock_fast();
0449
0450 count = atomic_read(&q->nr_buf_used);
0451 if (!count)
0452 return 0;
0453
0454 if (qdio_need_siga_sync(q->irq_ptr))
0455 qdio_sync_input_queue(q);
0456
0457 count = get_buf_states(q, start, &state, count, 1);
0458 if (!count)
0459 return 0;
0460
0461 switch (state) {
0462 case SLSB_P_INPUT_PRIMED:
0463 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr,
0464 count);
0465
0466 inbound_handle_work(q, start, count, is_qebsm(q));
0467 if (atomic_sub_return(count, &q->nr_buf_used) == 0)
0468 qperf_inc(q, inbound_queue_full);
0469 if (q->irq_ptr->perf_stat_enabled)
0470 account_sbals(q, count);
0471 return count;
0472 case SLSB_P_INPUT_ERROR:
0473 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in err:%1d %02x", q->nr,
0474 count);
0475
0476 *error = QDIO_ERROR_SLSB_STATE;
0477 process_buffer_error(q, start, count);
0478 inbound_handle_work(q, start, count, false);
0479 if (atomic_sub_return(count, &q->nr_buf_used) == 0)
0480 qperf_inc(q, inbound_queue_full);
0481 if (q->irq_ptr->perf_stat_enabled)
0482 account_sbals_error(q, count);
0483 return count;
0484 case SLSB_CU_INPUT_EMPTY:
0485 if (q->irq_ptr->perf_stat_enabled)
0486 q->q_stats.nr_sbal_nop++;
0487 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
0488 q->nr, start);
0489 return 0;
0490 case SLSB_P_INPUT_NOT_INIT:
0491 case SLSB_P_INPUT_ACK:
0492
0493 default:
0494 dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
0495 "found state %#x at index %u on queue %u\n",
0496 state, start, q->nr);
0497 return 0;
0498 }
0499 }
0500
0501 int qdio_inspect_input_queue(struct ccw_device *cdev, unsigned int nr,
0502 unsigned int *bufnr, unsigned int *error)
0503 {
0504 struct qdio_irq *irq = cdev->private->qdio_data;
0505 unsigned int start;
0506 struct qdio_q *q;
0507 int count;
0508
0509 if (!irq)
0510 return -ENODEV;
0511
0512 q = irq->input_qs[nr];
0513 start = q->first_to_check;
0514 *error = 0;
0515
0516 count = get_inbound_buffer_frontier(q, start, error);
0517 if (count == 0)
0518 return 0;
0519
0520 *bufnr = start;
0521 q->first_to_check = add_buf(start, count);
0522 return count;
0523 }
0524 EXPORT_SYMBOL_GPL(qdio_inspect_input_queue);
0525
0526 static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
0527 {
0528 unsigned char state = 0;
0529
0530 if (!atomic_read(&q->nr_buf_used))
0531 return 1;
0532
0533 if (qdio_need_siga_sync(q->irq_ptr))
0534 qdio_sync_input_queue(q);
0535 get_buf_state(q, start, &state, 0);
0536
0537 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
0538
0539 return 0;
0540
0541 return 1;
0542 }
0543
0544 static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
0545 unsigned int *error)
0546 {
0547 unsigned char state = 0;
0548 int count;
0549
0550 q->timestamp = get_tod_clock_fast();
0551
0552 count = atomic_read(&q->nr_buf_used);
0553 if (!count)
0554 return 0;
0555
0556 if (qdio_need_siga_sync(q->irq_ptr))
0557 qdio_sync_output_queue(q);
0558
0559 count = get_buf_states(q, start, &state, count, 0);
0560 if (!count)
0561 return 0;
0562
0563 switch (state) {
0564 case SLSB_P_OUTPUT_PENDING:
0565 *error = QDIO_ERROR_SLSB_PENDING;
0566 fallthrough;
0567 case SLSB_P_OUTPUT_EMPTY:
0568
0569 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
0570 "out empty:%1d %02x", q->nr, count);
0571
0572 atomic_sub(count, &q->nr_buf_used);
0573 if (q->irq_ptr->perf_stat_enabled)
0574 account_sbals(q, count);
0575 return count;
0576 case SLSB_P_OUTPUT_ERROR:
0577 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out error:%1d %02x",
0578 q->nr, count);
0579
0580 *error = QDIO_ERROR_SLSB_STATE;
0581 process_buffer_error(q, start, count);
0582 atomic_sub(count, &q->nr_buf_used);
0583 if (q->irq_ptr->perf_stat_enabled)
0584 account_sbals_error(q, count);
0585 return count;
0586 case SLSB_CU_OUTPUT_PRIMED:
0587
0588 if (q->irq_ptr->perf_stat_enabled)
0589 q->q_stats.nr_sbal_nop++;
0590 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
0591 q->nr);
0592 return 0;
0593 case SLSB_P_OUTPUT_HALTED:
0594 return 0;
0595 case SLSB_P_OUTPUT_NOT_INIT:
0596
0597 default:
0598 dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
0599 "found state %#x at index %u on queue %u\n",
0600 state, start, q->nr);
0601 return 0;
0602 }
0603 }
0604
0605 int qdio_inspect_output_queue(struct ccw_device *cdev, unsigned int nr,
0606 unsigned int *bufnr, unsigned int *error)
0607 {
0608 struct qdio_irq *irq = cdev->private->qdio_data;
0609 unsigned int start;
0610 struct qdio_q *q;
0611 int count;
0612
0613 if (!irq)
0614 return -ENODEV;
0615
0616 q = irq->output_qs[nr];
0617 start = q->first_to_check;
0618 *error = 0;
0619
0620 count = get_outbound_buffer_frontier(q, start, error);
0621 if (count == 0)
0622 return 0;
0623
0624 *bufnr = start;
0625 q->first_to_check = add_buf(start, count);
0626 return count;
0627 }
0628 EXPORT_SYMBOL_GPL(qdio_inspect_output_queue);
0629
0630 static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count,
0631 unsigned long aob)
0632 {
0633 int retries = 0, cc;
0634 unsigned int busy_bit;
0635
0636 if (!qdio_need_siga_out(q->irq_ptr))
0637 return 0;
0638
0639 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
0640 retry:
0641 qperf_inc(q, siga_write);
0642
0643 cc = qdio_siga_output(q, count, &busy_bit, aob);
0644 switch (cc) {
0645 case 0:
0646 break;
0647 case 2:
0648 if (busy_bit) {
0649 while (++retries < QDIO_BUSY_BIT_RETRIES) {
0650 mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
0651 goto retry;
0652 }
0653 DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
0654 cc = -EBUSY;
0655 } else {
0656 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
0657 cc = -ENOBUFS;
0658 }
0659 break;
0660 case 1:
0661 case 3:
0662 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
0663 cc = -EIO;
0664 break;
0665 }
0666 if (retries) {
0667 DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
0668 DBF_ERROR("count:%u", retries);
0669 }
0670 return cc;
0671 }
0672
0673 static inline void qdio_set_state(struct qdio_irq *irq_ptr,
0674 enum qdio_irq_states state)
0675 {
0676 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
0677
0678 irq_ptr->state = state;
0679 mb();
0680 }
0681
0682 static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
0683 {
0684 if (irb->esw.esw0.erw.cons) {
0685 DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
0686 DBF_ERROR_HEX(irb, 64);
0687 DBF_ERROR_HEX(irb->ecw, 64);
0688 }
0689 }
0690
0691
0692 static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
0693 {
0694 if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
0695 return;
0696
0697 qdio_deliver_irq(irq_ptr);
0698 irq_ptr->last_data_irq_time = S390_lowcore.int_clock;
0699 }
0700
0701 static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
0702 unsigned long intparm, int cstat,
0703 int dstat)
0704 {
0705 unsigned int first_to_check = 0;
0706
0707 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
0708 DBF_ERROR("intp :%lx", intparm);
0709 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
0710
0711
0712 if (irq_ptr->nr_input_qs)
0713 first_to_check = irq_ptr->input_qs[0]->first_to_check;
0714
0715 irq_ptr->error_handler(irq_ptr->cdev, QDIO_ERROR_ACTIVATE, 0,
0716 first_to_check, 0, irq_ptr->int_parm);
0717 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
0718
0719
0720
0721
0722 lgr_info_log();
0723 }
0724
0725 static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
0726 int dstat)
0727 {
0728 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
0729
0730 if (cstat)
0731 goto error;
0732 if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
0733 goto error;
0734 if (!(dstat & DEV_STAT_DEV_END))
0735 goto error;
0736 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
0737 return;
0738
0739 error:
0740 DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
0741 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
0742 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
0743 }
0744
0745
0746 void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
0747 struct irb *irb)
0748 {
0749 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
0750 struct subchannel_id schid;
0751 int cstat, dstat;
0752
0753 if (!intparm || !irq_ptr) {
0754 ccw_device_get_schid(cdev, &schid);
0755 DBF_ERROR("qint:%4x", schid.sch_no);
0756 return;
0757 }
0758
0759 if (irq_ptr->perf_stat_enabled)
0760 irq_ptr->perf_stat.qdio_int++;
0761
0762 if (IS_ERR(irb)) {
0763 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
0764 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
0765 wake_up(&cdev->private->wait_q);
0766 return;
0767 }
0768 qdio_irq_check_sense(irq_ptr, irb);
0769 cstat = irb->scsw.cmd.cstat;
0770 dstat = irb->scsw.cmd.dstat;
0771
0772 switch (irq_ptr->state) {
0773 case QDIO_IRQ_STATE_INACTIVE:
0774 qdio_establish_handle_irq(irq_ptr, cstat, dstat);
0775 break;
0776 case QDIO_IRQ_STATE_CLEANUP:
0777 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
0778 break;
0779 case QDIO_IRQ_STATE_ESTABLISHED:
0780 case QDIO_IRQ_STATE_ACTIVE:
0781 if (cstat & SCHN_STAT_PCI) {
0782 qdio_int_handler_pci(irq_ptr);
0783 return;
0784 }
0785 if (cstat || dstat)
0786 qdio_handle_activate_check(irq_ptr, intparm, cstat,
0787 dstat);
0788 break;
0789 case QDIO_IRQ_STATE_STOPPED:
0790 break;
0791 default:
0792 WARN_ON_ONCE(1);
0793 }
0794 wake_up(&cdev->private->wait_q);
0795 }
0796
0797
0798
0799
0800
0801
0802
0803
0804
0805 int qdio_get_ssqd_desc(struct ccw_device *cdev,
0806 struct qdio_ssqd_desc *data)
0807 {
0808 struct subchannel_id schid;
0809
0810 if (!cdev || !cdev->private)
0811 return -EINVAL;
0812
0813 ccw_device_get_schid(cdev, &schid);
0814 DBF_EVENT("get ssqd:%4x", schid.sch_no);
0815 return qdio_setup_get_ssqd(NULL, &schid, data);
0816 }
0817 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
0818
0819 static int qdio_cancel_ccw(struct qdio_irq *irq, int how)
0820 {
0821 struct ccw_device *cdev = irq->cdev;
0822 long timeout;
0823 int rc;
0824
0825 spin_lock_irq(get_ccwdev_lock(cdev));
0826 qdio_set_state(irq, QDIO_IRQ_STATE_CLEANUP);
0827 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
0828 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
0829 else
0830
0831 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
0832 spin_unlock_irq(get_ccwdev_lock(cdev));
0833 if (rc) {
0834 DBF_ERROR("%4x SHUTD ERR", irq->schid.sch_no);
0835 DBF_ERROR("rc:%4d", rc);
0836 return rc;
0837 }
0838
0839 timeout = wait_event_interruptible_timeout(cdev->private->wait_q,
0840 irq->state == QDIO_IRQ_STATE_INACTIVE ||
0841 irq->state == QDIO_IRQ_STATE_ERR,
0842 10 * HZ);
0843 if (timeout <= 0)
0844 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
0845
0846 return rc;
0847 }
0848
0849
0850
0851
0852
0853
0854 int qdio_shutdown(struct ccw_device *cdev, int how)
0855 {
0856 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
0857 struct subchannel_id schid;
0858 int rc;
0859
0860 if (!irq_ptr)
0861 return -ENODEV;
0862
0863 WARN_ON_ONCE(irqs_disabled());
0864 ccw_device_get_schid(cdev, &schid);
0865 DBF_EVENT("qshutdown:%4x", schid.sch_no);
0866
0867 mutex_lock(&irq_ptr->setup_mutex);
0868
0869
0870
0871
0872 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
0873 mutex_unlock(&irq_ptr->setup_mutex);
0874 return 0;
0875 }
0876
0877
0878
0879
0880 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
0881
0882 qdio_shutdown_debug_entries(irq_ptr);
0883
0884 rc = qdio_cancel_ccw(irq_ptr, how);
0885 qdio_shutdown_thinint(irq_ptr);
0886 qdio_shutdown_irq(irq_ptr);
0887
0888 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
0889 mutex_unlock(&irq_ptr->setup_mutex);
0890 if (rc)
0891 return rc;
0892 return 0;
0893 }
0894 EXPORT_SYMBOL_GPL(qdio_shutdown);
0895
0896
0897
0898
0899
0900 int qdio_free(struct ccw_device *cdev)
0901 {
0902 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
0903 struct subchannel_id schid;
0904
0905 if (!irq_ptr)
0906 return -ENODEV;
0907
0908 ccw_device_get_schid(cdev, &schid);
0909 DBF_EVENT("qfree:%4x", schid.sch_no);
0910 DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned");
0911 mutex_lock(&irq_ptr->setup_mutex);
0912
0913 irq_ptr->debug_area = NULL;
0914 cdev->private->qdio_data = NULL;
0915 mutex_unlock(&irq_ptr->setup_mutex);
0916
0917 qdio_free_queues(irq_ptr);
0918 free_page((unsigned long) irq_ptr->qdr);
0919 free_page(irq_ptr->chsc_page);
0920 kfree(irq_ptr->ccw);
0921 free_page((unsigned long) irq_ptr);
0922 return 0;
0923 }
0924 EXPORT_SYMBOL_GPL(qdio_free);
0925
0926
0927
0928
0929
0930
0931
0932 int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
0933 unsigned int no_output_qs)
0934 {
0935 struct subchannel_id schid;
0936 struct qdio_irq *irq_ptr;
0937 int rc = -ENOMEM;
0938
0939 ccw_device_get_schid(cdev, &schid);
0940 DBF_EVENT("qallocate:%4x", schid.sch_no);
0941
0942 if (no_input_qs > QDIO_MAX_QUEUES_PER_IRQ ||
0943 no_output_qs > QDIO_MAX_QUEUES_PER_IRQ)
0944 return -EINVAL;
0945
0946 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL);
0947 if (!irq_ptr)
0948 return -ENOMEM;
0949
0950 irq_ptr->ccw = kmalloc(sizeof(*irq_ptr->ccw), GFP_KERNEL | GFP_DMA);
0951 if (!irq_ptr->ccw)
0952 goto err_ccw;
0953
0954
0955 kmemleak_not_leak(irq_ptr->ccw);
0956
0957 irq_ptr->cdev = cdev;
0958 mutex_init(&irq_ptr->setup_mutex);
0959 if (qdio_allocate_dbf(irq_ptr))
0960 goto err_dbf;
0961
0962 DBF_DEV_EVENT(DBF_ERR, irq_ptr, "alloc niq:%1u noq:%1u", no_input_qs,
0963 no_output_qs);
0964
0965
0966
0967
0968
0969
0970
0971 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
0972 if (!irq_ptr->chsc_page)
0973 goto err_chsc;
0974
0975
0976 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
0977 if (!irq_ptr->qdr)
0978 goto err_qdr;
0979
0980 rc = qdio_allocate_qs(irq_ptr, no_input_qs, no_output_qs);
0981 if (rc)
0982 goto err_queues;
0983
0984 cdev->private->qdio_data = irq_ptr;
0985 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
0986 return 0;
0987
0988 err_queues:
0989 free_page((unsigned long) irq_ptr->qdr);
0990 err_qdr:
0991 free_page(irq_ptr->chsc_page);
0992 err_chsc:
0993 err_dbf:
0994 kfree(irq_ptr->ccw);
0995 err_ccw:
0996 free_page((unsigned long) irq_ptr);
0997 return rc;
0998 }
0999 EXPORT_SYMBOL_GPL(qdio_allocate);
1000
1001 static void qdio_trace_init_data(struct qdio_irq *irq,
1002 struct qdio_initialize *data)
1003 {
1004 DBF_DEV_EVENT(DBF_ERR, irq, "qfmt:%1u", data->q_format);
1005 DBF_DEV_EVENT(DBF_ERR, irq, "qpff%4x", data->qib_param_field_format);
1006 DBF_DEV_HEX(irq, &data->qib_param_field, sizeof(void *), DBF_ERR);
1007 DBF_DEV_EVENT(DBF_ERR, irq, "niq:%1u noq:%1u", data->no_input_qs,
1008 data->no_output_qs);
1009 DBF_DEV_HEX(irq, &data->input_handler, sizeof(void *), DBF_ERR);
1010 DBF_DEV_HEX(irq, &data->output_handler, sizeof(void *), DBF_ERR);
1011 DBF_DEV_HEX(irq, &data->int_parm, sizeof(long), DBF_ERR);
1012 DBF_DEV_HEX(irq, &data->input_sbal_addr_array, sizeof(void *), DBF_ERR);
1013 DBF_DEV_HEX(irq, &data->output_sbal_addr_array, sizeof(void *),
1014 DBF_ERR);
1015 }
1016
1017
1018
1019
1020
1021
1022 int qdio_establish(struct ccw_device *cdev,
1023 struct qdio_initialize *init_data)
1024 {
1025 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1026 struct subchannel_id schid;
1027 struct ciw *ciw;
1028 long timeout;
1029 int rc;
1030
1031 ccw_device_get_schid(cdev, &schid);
1032 DBF_EVENT("qestablish:%4x", schid.sch_no);
1033
1034 if (!irq_ptr)
1035 return -ENODEV;
1036
1037 if (init_data->no_input_qs > irq_ptr->max_input_qs ||
1038 init_data->no_output_qs > irq_ptr->max_output_qs)
1039 return -EINVAL;
1040
1041
1042 if (!init_data->input_handler)
1043 return -EINVAL;
1044
1045 if (init_data->no_output_qs && !init_data->output_handler)
1046 return -EINVAL;
1047
1048 if (!init_data->input_sbal_addr_array ||
1049 !init_data->output_sbal_addr_array)
1050 return -EINVAL;
1051
1052 if (!init_data->irq_poll)
1053 return -EINVAL;
1054
1055 ciw = ccw_device_get_ciw(cdev, CIW_TYPE_EQUEUE);
1056 if (!ciw) {
1057 DBF_ERROR("%4x NO EQ", schid.sch_no);
1058 return -EIO;
1059 }
1060
1061 mutex_lock(&irq_ptr->setup_mutex);
1062 qdio_trace_init_data(irq_ptr, init_data);
1063 qdio_setup_irq(irq_ptr, init_data);
1064
1065 rc = qdio_establish_thinint(irq_ptr);
1066 if (rc)
1067 goto err_thinint;
1068
1069
1070 irq_ptr->ccw->cmd_code = ciw->cmd;
1071 irq_ptr->ccw->flags = CCW_FLAG_SLI;
1072 irq_ptr->ccw->count = ciw->count;
1073 irq_ptr->ccw->cda = (u32) virt_to_phys(irq_ptr->qdr);
1074
1075 spin_lock_irq(get_ccwdev_lock(cdev));
1076 ccw_device_set_options_mask(cdev, 0);
1077
1078 rc = ccw_device_start(cdev, irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1079 spin_unlock_irq(get_ccwdev_lock(cdev));
1080 if (rc) {
1081 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1082 DBF_ERROR("rc:%4x", rc);
1083 goto err_ccw_start;
1084 }
1085
1086 timeout = wait_event_interruptible_timeout(cdev->private->wait_q,
1087 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1088 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1089 if (timeout <= 0) {
1090 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
1091 goto err_ccw_timeout;
1092 }
1093
1094 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1095 rc = -EIO;
1096 goto err_ccw_error;
1097 }
1098
1099 qdio_setup_ssqd_info(irq_ptr);
1100
1101
1102 qdio_init_buf_states(irq_ptr);
1103
1104 mutex_unlock(&irq_ptr->setup_mutex);
1105 qdio_print_subchannel_info(irq_ptr);
1106 qdio_setup_debug_entries(irq_ptr);
1107 return 0;
1108
1109 err_ccw_timeout:
1110 qdio_cancel_ccw(irq_ptr, QDIO_FLAG_CLEANUP_USING_CLEAR);
1111 err_ccw_error:
1112 err_ccw_start:
1113 qdio_shutdown_thinint(irq_ptr);
1114 err_thinint:
1115 qdio_shutdown_irq(irq_ptr);
1116 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1117 mutex_unlock(&irq_ptr->setup_mutex);
1118 return rc;
1119 }
1120 EXPORT_SYMBOL_GPL(qdio_establish);
1121
1122
1123
1124
1125
1126 int qdio_activate(struct ccw_device *cdev)
1127 {
1128 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1129 struct subchannel_id schid;
1130 struct ciw *ciw;
1131 int rc;
1132
1133 ccw_device_get_schid(cdev, &schid);
1134 DBF_EVENT("qactivate:%4x", schid.sch_no);
1135
1136 if (!irq_ptr)
1137 return -ENODEV;
1138
1139 ciw = ccw_device_get_ciw(cdev, CIW_TYPE_AQUEUE);
1140 if (!ciw) {
1141 DBF_ERROR("%4x NO AQ", schid.sch_no);
1142 return -EIO;
1143 }
1144
1145 mutex_lock(&irq_ptr->setup_mutex);
1146 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1147 rc = -EBUSY;
1148 goto out;
1149 }
1150
1151 irq_ptr->ccw->cmd_code = ciw->cmd;
1152 irq_ptr->ccw->flags = CCW_FLAG_SLI;
1153 irq_ptr->ccw->count = ciw->count;
1154 irq_ptr->ccw->cda = 0;
1155
1156 spin_lock_irq(get_ccwdev_lock(cdev));
1157 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1158
1159 rc = ccw_device_start(cdev, irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1160 0, DOIO_DENY_PREFETCH);
1161 spin_unlock_irq(get_ccwdev_lock(cdev));
1162 if (rc) {
1163 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1164 DBF_ERROR("rc:%4x", rc);
1165 goto out;
1166 }
1167
1168
1169 msleep(5);
1170
1171 switch (irq_ptr->state) {
1172 case QDIO_IRQ_STATE_STOPPED:
1173 case QDIO_IRQ_STATE_ERR:
1174 rc = -EIO;
1175 break;
1176 default:
1177 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1178 rc = 0;
1179 }
1180 out:
1181 mutex_unlock(&irq_ptr->setup_mutex);
1182 return rc;
1183 }
1184 EXPORT_SYMBOL_GPL(qdio_activate);
1185
1186
1187
1188
1189
1190
1191
1192 static int handle_inbound(struct qdio_q *q, int bufnr, int count)
1193 {
1194 int overlap;
1195
1196 qperf_inc(q, inbound_call);
1197
1198
1199 overlap = min_t(int, count - sub_buf(q->u.in.batch_start, bufnr),
1200 q->u.in.batch_count);
1201 if (overlap > 0) {
1202 q->u.in.batch_start = add_buf(q->u.in.batch_start, overlap);
1203 q->u.in.batch_count -= overlap;
1204 }
1205
1206 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1207 atomic_add(count, &q->nr_buf_used);
1208
1209 if (qdio_need_siga_in(q->irq_ptr))
1210 return qdio_siga_input(q);
1211
1212 return 0;
1213 }
1214
1215
1216
1217
1218
1219
1220
1221
1222 int qdio_add_bufs_to_input_queue(struct ccw_device *cdev, unsigned int q_nr,
1223 unsigned int bufnr, unsigned int count)
1224 {
1225 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1226
1227 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1228 return -EINVAL;
1229
1230 if (!irq_ptr)
1231 return -ENODEV;
1232
1233 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "addi b:%02x c:%02x", bufnr, count);
1234
1235 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1236 return -EIO;
1237 if (!count)
1238 return 0;
1239
1240 return handle_inbound(irq_ptr->input_qs[q_nr], bufnr, count);
1241 }
1242 EXPORT_SYMBOL_GPL(qdio_add_bufs_to_input_queue);
1243
1244
1245
1246
1247
1248
1249
1250
1251 static int handle_outbound(struct qdio_q *q, unsigned int bufnr, unsigned int count,
1252 struct qaob *aob)
1253 {
1254 unsigned char state = 0;
1255 int used, rc = 0;
1256
1257 qperf_inc(q, outbound_call);
1258
1259 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1260 used = atomic_add_return(count, &q->nr_buf_used);
1261
1262 if (used == QDIO_MAX_BUFFERS_PER_Q)
1263 qperf_inc(q, outbound_queue_full);
1264
1265 if (queue_type(q) == QDIO_IQDIO_QFMT) {
1266 unsigned long phys_aob = aob ? virt_to_phys(aob) : 0;
1267
1268 WARN_ON_ONCE(!IS_ALIGNED(phys_aob, 256));
1269 rc = qdio_kick_outbound_q(q, count, phys_aob);
1270 } else if (qdio_need_siga_sync(q->irq_ptr)) {
1271 rc = qdio_sync_output_queue(q);
1272 } else if (count < QDIO_MAX_BUFFERS_PER_Q &&
1273 get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
1274 state == SLSB_CU_OUTPUT_PRIMED) {
1275
1276 qperf_inc(q, fast_requeue);
1277 } else {
1278 rc = qdio_kick_outbound_q(q, count, 0);
1279 }
1280
1281 return rc;
1282 }
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292 int qdio_add_bufs_to_output_queue(struct ccw_device *cdev, unsigned int q_nr,
1293 unsigned int bufnr, unsigned int count,
1294 struct qaob *aob)
1295 {
1296 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1297
1298 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1299 return -EINVAL;
1300
1301 if (!irq_ptr)
1302 return -ENODEV;
1303
1304 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "addo b:%02x c:%02x", bufnr, count);
1305
1306 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1307 return -EIO;
1308 if (!count)
1309 return 0;
1310
1311 return handle_outbound(irq_ptr->output_qs[q_nr], bufnr, count, aob);
1312 }
1313 EXPORT_SYMBOL_GPL(qdio_add_bufs_to_output_queue);
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323 int qdio_start_irq(struct ccw_device *cdev)
1324 {
1325 struct qdio_q *q;
1326 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1327 unsigned int i;
1328
1329 if (!irq_ptr)
1330 return -ENODEV;
1331
1332 for_each_input_queue(irq_ptr, q, i)
1333 qdio_stop_polling(q);
1334
1335 clear_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
1336
1337
1338
1339
1340
1341 if (test_nonshared_ind(irq_ptr))
1342 goto rescan;
1343
1344 for_each_input_queue(irq_ptr, q, i) {
1345 if (!qdio_inbound_q_done(q, q->first_to_check))
1346 goto rescan;
1347 }
1348
1349 return 0;
1350
1351 rescan:
1352 if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
1353 return 0;
1354 else
1355 return 1;
1356
1357 }
1358 EXPORT_SYMBOL(qdio_start_irq);
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368 int qdio_stop_irq(struct ccw_device *cdev)
1369 {
1370 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1371
1372 if (!irq_ptr)
1373 return -ENODEV;
1374
1375 if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
1376 return 0;
1377 else
1378 return 1;
1379 }
1380 EXPORT_SYMBOL(qdio_stop_irq);
1381
1382 static int __init init_QDIO(void)
1383 {
1384 int rc;
1385
1386 rc = qdio_debug_init();
1387 if (rc)
1388 return rc;
1389 rc = qdio_setup_init();
1390 if (rc)
1391 goto out_debug;
1392 rc = qdio_thinint_init();
1393 if (rc)
1394 goto out_cache;
1395 return 0;
1396
1397 out_cache:
1398 qdio_setup_exit();
1399 out_debug:
1400 qdio_debug_exit();
1401 return rc;
1402 }
1403
1404 static void __exit exit_QDIO(void)
1405 {
1406 qdio_thinint_exit();
1407 qdio_setup_exit();
1408 qdio_debug_exit();
1409 }
1410
1411 module_init(init_QDIO);
1412 module_exit(exit_QDIO);