0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #define KMSG_COMPONENT "cio"
0014 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
0015
0016 #include <linux/memblock.h>
0017 #include <linux/device.h>
0018 #include <linux/init.h>
0019 #include <linux/list.h>
0020 #include <linux/export.h>
0021 #include <linux/moduleparam.h>
0022 #include <linux/slab.h>
0023 #include <linux/timex.h> /* get_tod_clock() */
0024
0025 #include <asm/ccwdev.h>
0026 #include <asm/cio.h>
0027 #include <asm/cmb.h>
0028 #include <asm/div64.h>
0029
0030 #include "cio.h"
0031 #include "css.h"
0032 #include "device.h"
0033 #include "ioasm.h"
0034 #include "chsc.h"
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044 #define ARGSTRING "s390cmf"
0045
0046
0047 enum cmb_index {
0048 avg_utilization = -1,
0049
0050 cmb_ssch_rsch_count = 0,
0051 cmb_sample_count,
0052 cmb_device_connect_time,
0053 cmb_function_pending_time,
0054 cmb_device_disconnect_time,
0055 cmb_control_unit_queuing_time,
0056 cmb_device_active_only_time,
0057
0058 cmb_device_busy_time,
0059 cmb_initial_command_response_time,
0060 };
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073 enum cmb_format {
0074 CMF_BASIC,
0075 CMF_EXTENDED,
0076 CMF_AUTODETECT = -1,
0077 };
0078
0079
0080
0081
0082
0083
0084
0085
0086 static int format = CMF_AUTODETECT;
0087 module_param(format, bint, 0444);
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105 struct cmb_operations {
0106 int (*alloc) (struct ccw_device *);
0107 void (*free) (struct ccw_device *);
0108 int (*set) (struct ccw_device *, u32);
0109 u64 (*read) (struct ccw_device *, int);
0110 int (*readall)(struct ccw_device *, struct cmbdata *);
0111 void (*reset) (struct ccw_device *);
0112
0113 struct attribute_group *attr_group;
0114 };
0115 static struct cmb_operations *cmbops;
0116
0117 struct cmb_data {
0118 void *hw_block;
0119 void *last_block;
0120 int size;
0121 unsigned long long last_update;
0122 };
0123
0124
0125
0126
0127
0128
0129 static inline u64 time_to_nsec(u32 value)
0130 {
0131 return ((u64)value) * 128000ull;
0132 }
0133
0134
0135
0136
0137
0138
0139
0140 static inline u64 time_to_avg_nsec(u32 value, u32 count)
0141 {
0142 u64 ret;
0143
0144
0145 if (count == 0)
0146 return 0;
0147
0148
0149 ret = time_to_nsec(value);
0150 do_div(ret, count);
0151
0152 return ret;
0153 }
0154
0155 #define CMF_OFF 0
0156 #define CMF_ON 2
0157
0158
0159
0160
0161
0162
0163
0164 static inline void cmf_activate(void *area, unsigned int onoff)
0165 {
0166
0167 asm volatile(
0168 " lgr 1,%[r1]\n"
0169 " lgr 2,%[mbo]\n"
0170 " schm\n"
0171 :
0172 : [r1] "d" ((unsigned long)onoff), [mbo] "d" (area)
0173 : "1", "2");
0174 }
0175
0176 static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc,
0177 unsigned long address)
0178 {
0179 struct subchannel *sch = to_subchannel(cdev->dev.parent);
0180 int ret;
0181
0182 sch->config.mme = mme;
0183 sch->config.mbfc = mbfc;
0184
0185 if (mbfc)
0186 sch->config.mba = address;
0187 else
0188 sch->config.mbi = address;
0189
0190 ret = cio_commit_config(sch);
0191 if (!mme && ret == -ENODEV) {
0192
0193
0194
0195
0196 ret = 0;
0197 }
0198 return ret;
0199 }
0200
0201 struct set_schib_struct {
0202 u32 mme;
0203 int mbfc;
0204 unsigned long address;
0205 wait_queue_head_t wait;
0206 int ret;
0207 };
0208
0209 #define CMF_PENDING 1
0210 #define SET_SCHIB_TIMEOUT (10 * HZ)
0211
0212 static int set_schib_wait(struct ccw_device *cdev, u32 mme,
0213 int mbfc, unsigned long address)
0214 {
0215 struct set_schib_struct set_data;
0216 int ret = -ENODEV;
0217
0218 spin_lock_irq(cdev->ccwlock);
0219 if (!cdev->private->cmb)
0220 goto out;
0221
0222 ret = set_schib(cdev, mme, mbfc, address);
0223 if (ret != -EBUSY)
0224 goto out;
0225
0226
0227 if (cdev->private->state != DEV_STATE_ONLINE)
0228 goto out;
0229
0230 init_waitqueue_head(&set_data.wait);
0231 set_data.mme = mme;
0232 set_data.mbfc = mbfc;
0233 set_data.address = address;
0234 set_data.ret = CMF_PENDING;
0235
0236 cdev->private->state = DEV_STATE_CMFCHANGE;
0237 cdev->private->cmb_wait = &set_data;
0238 spin_unlock_irq(cdev->ccwlock);
0239
0240 ret = wait_event_interruptible_timeout(set_data.wait,
0241 set_data.ret != CMF_PENDING,
0242 SET_SCHIB_TIMEOUT);
0243 spin_lock_irq(cdev->ccwlock);
0244 if (ret <= 0) {
0245 if (set_data.ret == CMF_PENDING) {
0246 set_data.ret = (ret == 0) ? -ETIME : ret;
0247 if (cdev->private->state == DEV_STATE_CMFCHANGE)
0248 cdev->private->state = DEV_STATE_ONLINE;
0249 }
0250 }
0251 cdev->private->cmb_wait = NULL;
0252 ret = set_data.ret;
0253 out:
0254 spin_unlock_irq(cdev->ccwlock);
0255 return ret;
0256 }
0257
0258 void retry_set_schib(struct ccw_device *cdev)
0259 {
0260 struct set_schib_struct *set_data = cdev->private->cmb_wait;
0261
0262 if (!set_data)
0263 return;
0264
0265 set_data->ret = set_schib(cdev, set_data->mme, set_data->mbfc,
0266 set_data->address);
0267 wake_up(&set_data->wait);
0268 }
0269
0270 static int cmf_copy_block(struct ccw_device *cdev)
0271 {
0272 struct subchannel *sch = to_subchannel(cdev->dev.parent);
0273 struct cmb_data *cmb_data;
0274 void *hw_block;
0275
0276 if (cio_update_schib(sch))
0277 return -ENODEV;
0278
0279 if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) {
0280
0281 if ((!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_SUSPENDED)) &&
0282 (scsw_actl(&sch->schib.scsw) &
0283 (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) &&
0284 (!(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_SEC_STATUS)))
0285 return -EBUSY;
0286 }
0287 cmb_data = cdev->private->cmb;
0288 hw_block = cmb_data->hw_block;
0289 memcpy(cmb_data->last_block, hw_block, cmb_data->size);
0290 cmb_data->last_update = get_tod_clock();
0291 return 0;
0292 }
0293
0294 struct copy_block_struct {
0295 wait_queue_head_t wait;
0296 int ret;
0297 };
0298
0299 static int cmf_cmb_copy_wait(struct ccw_device *cdev)
0300 {
0301 struct copy_block_struct copy_block;
0302 int ret = -ENODEV;
0303
0304 spin_lock_irq(cdev->ccwlock);
0305 if (!cdev->private->cmb)
0306 goto out;
0307
0308 ret = cmf_copy_block(cdev);
0309 if (ret != -EBUSY)
0310 goto out;
0311
0312 if (cdev->private->state != DEV_STATE_ONLINE)
0313 goto out;
0314
0315 init_waitqueue_head(©_block.wait);
0316 copy_block.ret = CMF_PENDING;
0317
0318 cdev->private->state = DEV_STATE_CMFUPDATE;
0319 cdev->private->cmb_wait = ©_block;
0320 spin_unlock_irq(cdev->ccwlock);
0321
0322 ret = wait_event_interruptible(copy_block.wait,
0323 copy_block.ret != CMF_PENDING);
0324 spin_lock_irq(cdev->ccwlock);
0325 if (ret) {
0326 if (copy_block.ret == CMF_PENDING) {
0327 copy_block.ret = -ERESTARTSYS;
0328 if (cdev->private->state == DEV_STATE_CMFUPDATE)
0329 cdev->private->state = DEV_STATE_ONLINE;
0330 }
0331 }
0332 cdev->private->cmb_wait = NULL;
0333 ret = copy_block.ret;
0334 out:
0335 spin_unlock_irq(cdev->ccwlock);
0336 return ret;
0337 }
0338
0339 void cmf_retry_copy_block(struct ccw_device *cdev)
0340 {
0341 struct copy_block_struct *copy_block = cdev->private->cmb_wait;
0342
0343 if (!copy_block)
0344 return;
0345
0346 copy_block->ret = cmf_copy_block(cdev);
0347 wake_up(©_block->wait);
0348 }
0349
0350 static void cmf_generic_reset(struct ccw_device *cdev)
0351 {
0352 struct cmb_data *cmb_data;
0353
0354 spin_lock_irq(cdev->ccwlock);
0355 cmb_data = cdev->private->cmb;
0356 if (cmb_data) {
0357 memset(cmb_data->last_block, 0, cmb_data->size);
0358
0359
0360
0361
0362 memset(cmb_data->hw_block, 0, cmb_data->size);
0363 cmb_data->last_update = 0;
0364 }
0365 cdev->private->cmb_start_time = get_tod_clock();
0366 spin_unlock_irq(cdev->ccwlock);
0367 }
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377 struct cmb_area {
0378 struct cmb *mem;
0379 struct list_head list;
0380 int num_channels;
0381 spinlock_t lock;
0382 };
0383
0384 static struct cmb_area cmb_area = {
0385 .lock = __SPIN_LOCK_UNLOCKED(cmb_area.lock),
0386 .list = LIST_HEAD_INIT(cmb_area.list),
0387 .num_channels = 1024,
0388 };
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402 module_param_named(maxchannels, cmb_area.num_channels, uint, 0444);
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422 struct cmb {
0423 u16 ssch_rsch_count;
0424 u16 sample_count;
0425 u32 device_connect_time;
0426 u32 function_pending_time;
0427 u32 device_disconnect_time;
0428 u32 control_unit_queuing_time;
0429 u32 device_active_only_time;
0430 u32 reserved[2];
0431 };
0432
0433
0434
0435
0436
0437 static int alloc_cmb_single(struct ccw_device *cdev,
0438 struct cmb_data *cmb_data)
0439 {
0440 struct cmb *cmb;
0441 struct ccw_device_private *node;
0442 int ret;
0443
0444 spin_lock_irq(cdev->ccwlock);
0445 if (!list_empty(&cdev->private->cmb_list)) {
0446 ret = -EBUSY;
0447 goto out;
0448 }
0449
0450
0451
0452
0453
0454
0455 cmb = cmb_area.mem;
0456 list_for_each_entry(node, &cmb_area.list, cmb_list) {
0457 struct cmb_data *data;
0458 data = node->cmb;
0459 if ((struct cmb*)data->hw_block > cmb)
0460 break;
0461 cmb++;
0462 }
0463 if (cmb - cmb_area.mem >= cmb_area.num_channels) {
0464 ret = -ENOMEM;
0465 goto out;
0466 }
0467
0468
0469 list_add_tail(&cdev->private->cmb_list, &node->cmb_list);
0470 cmb_data->hw_block = cmb;
0471 cdev->private->cmb = cmb_data;
0472 ret = 0;
0473 out:
0474 spin_unlock_irq(cdev->ccwlock);
0475 return ret;
0476 }
0477
0478 static int alloc_cmb(struct ccw_device *cdev)
0479 {
0480 int ret;
0481 struct cmb *mem;
0482 ssize_t size;
0483 struct cmb_data *cmb_data;
0484
0485
0486 cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL);
0487 if (!cmb_data)
0488 return -ENOMEM;
0489
0490 cmb_data->last_block = kzalloc(sizeof(struct cmb), GFP_KERNEL);
0491 if (!cmb_data->last_block) {
0492 kfree(cmb_data);
0493 return -ENOMEM;
0494 }
0495 cmb_data->size = sizeof(struct cmb);
0496 spin_lock(&cmb_area.lock);
0497
0498 if (!cmb_area.mem) {
0499
0500 size = sizeof(struct cmb) * cmb_area.num_channels;
0501 WARN_ON(!list_empty(&cmb_area.list));
0502
0503 spin_unlock(&cmb_area.lock);
0504 mem = (void*)__get_free_pages(GFP_KERNEL | GFP_DMA,
0505 get_order(size));
0506 spin_lock(&cmb_area.lock);
0507
0508 if (cmb_area.mem) {
0509
0510 free_pages((unsigned long)mem, get_order(size));
0511 } else if (!mem) {
0512
0513 ret = -ENOMEM;
0514 goto out;
0515 } else {
0516
0517 memset(mem, 0, size);
0518 cmb_area.mem = mem;
0519 cmf_activate(cmb_area.mem, CMF_ON);
0520 }
0521 }
0522
0523
0524 ret = alloc_cmb_single(cdev, cmb_data);
0525 out:
0526 spin_unlock(&cmb_area.lock);
0527 if (ret) {
0528 kfree(cmb_data->last_block);
0529 kfree(cmb_data);
0530 }
0531 return ret;
0532 }
0533
0534 static void free_cmb(struct ccw_device *cdev)
0535 {
0536 struct ccw_device_private *priv;
0537 struct cmb_data *cmb_data;
0538
0539 spin_lock(&cmb_area.lock);
0540 spin_lock_irq(cdev->ccwlock);
0541
0542 priv = cdev->private;
0543 cmb_data = priv->cmb;
0544 priv->cmb = NULL;
0545 if (cmb_data)
0546 kfree(cmb_data->last_block);
0547 kfree(cmb_data);
0548 list_del_init(&priv->cmb_list);
0549
0550 if (list_empty(&cmb_area.list)) {
0551 ssize_t size;
0552 size = sizeof(struct cmb) * cmb_area.num_channels;
0553 cmf_activate(NULL, CMF_OFF);
0554 free_pages((unsigned long)cmb_area.mem, get_order(size));
0555 cmb_area.mem = NULL;
0556 }
0557 spin_unlock_irq(cdev->ccwlock);
0558 spin_unlock(&cmb_area.lock);
0559 }
0560
0561 static int set_cmb(struct ccw_device *cdev, u32 mme)
0562 {
0563 u16 offset;
0564 struct cmb_data *cmb_data;
0565 unsigned long flags;
0566
0567 spin_lock_irqsave(cdev->ccwlock, flags);
0568 if (!cdev->private->cmb) {
0569 spin_unlock_irqrestore(cdev->ccwlock, flags);
0570 return -EINVAL;
0571 }
0572 cmb_data = cdev->private->cmb;
0573 offset = mme ? (struct cmb *)cmb_data->hw_block - cmb_area.mem : 0;
0574 spin_unlock_irqrestore(cdev->ccwlock, flags);
0575
0576 return set_schib_wait(cdev, mme, 0, offset);
0577 }
0578
0579
0580 static u64 __cmb_utilization(u64 device_connect_time, u64 function_pending_time,
0581 u64 device_disconnect_time, u64 start_time)
0582 {
0583 u64 utilization, elapsed_time;
0584
0585 utilization = time_to_nsec(device_connect_time +
0586 function_pending_time +
0587 device_disconnect_time);
0588
0589 elapsed_time = get_tod_clock() - start_time;
0590 elapsed_time = tod_to_ns(elapsed_time);
0591 elapsed_time /= 1000;
0592
0593 return elapsed_time ? (utilization / elapsed_time) : 0;
0594 }
0595
0596 static u64 read_cmb(struct ccw_device *cdev, int index)
0597 {
0598 struct cmb_data *cmb_data;
0599 unsigned long flags;
0600 struct cmb *cmb;
0601 u64 ret = 0;
0602 u32 val;
0603
0604 spin_lock_irqsave(cdev->ccwlock, flags);
0605 cmb_data = cdev->private->cmb;
0606 if (!cmb_data)
0607 goto out;
0608
0609 cmb = cmb_data->hw_block;
0610 switch (index) {
0611 case avg_utilization:
0612 ret = __cmb_utilization(cmb->device_connect_time,
0613 cmb->function_pending_time,
0614 cmb->device_disconnect_time,
0615 cdev->private->cmb_start_time);
0616 goto out;
0617 case cmb_ssch_rsch_count:
0618 ret = cmb->ssch_rsch_count;
0619 goto out;
0620 case cmb_sample_count:
0621 ret = cmb->sample_count;
0622 goto out;
0623 case cmb_device_connect_time:
0624 val = cmb->device_connect_time;
0625 break;
0626 case cmb_function_pending_time:
0627 val = cmb->function_pending_time;
0628 break;
0629 case cmb_device_disconnect_time:
0630 val = cmb->device_disconnect_time;
0631 break;
0632 case cmb_control_unit_queuing_time:
0633 val = cmb->control_unit_queuing_time;
0634 break;
0635 case cmb_device_active_only_time:
0636 val = cmb->device_active_only_time;
0637 break;
0638 default:
0639 goto out;
0640 }
0641 ret = time_to_avg_nsec(val, cmb->sample_count);
0642 out:
0643 spin_unlock_irqrestore(cdev->ccwlock, flags);
0644 return ret;
0645 }
0646
0647 static int readall_cmb(struct ccw_device *cdev, struct cmbdata *data)
0648 {
0649 struct cmb *cmb;
0650 struct cmb_data *cmb_data;
0651 u64 time;
0652 unsigned long flags;
0653 int ret;
0654
0655 ret = cmf_cmb_copy_wait(cdev);
0656 if (ret < 0)
0657 return ret;
0658 spin_lock_irqsave(cdev->ccwlock, flags);
0659 cmb_data = cdev->private->cmb;
0660 if (!cmb_data) {
0661 ret = -ENODEV;
0662 goto out;
0663 }
0664 if (cmb_data->last_update == 0) {
0665 ret = -EAGAIN;
0666 goto out;
0667 }
0668 cmb = cmb_data->last_block;
0669 time = cmb_data->last_update - cdev->private->cmb_start_time;
0670
0671 memset(data, 0, sizeof(struct cmbdata));
0672
0673
0674 data->size = offsetof(struct cmbdata, device_busy_time);
0675
0676 data->elapsed_time = tod_to_ns(time);
0677
0678
0679 data->ssch_rsch_count = cmb->ssch_rsch_count;
0680 data->sample_count = cmb->sample_count;
0681
0682
0683 data->device_connect_time = time_to_nsec(cmb->device_connect_time);
0684 data->function_pending_time = time_to_nsec(cmb->function_pending_time);
0685 data->device_disconnect_time =
0686 time_to_nsec(cmb->device_disconnect_time);
0687 data->control_unit_queuing_time
0688 = time_to_nsec(cmb->control_unit_queuing_time);
0689 data->device_active_only_time
0690 = time_to_nsec(cmb->device_active_only_time);
0691 ret = 0;
0692 out:
0693 spin_unlock_irqrestore(cdev->ccwlock, flags);
0694 return ret;
0695 }
0696
0697 static void reset_cmb(struct ccw_device *cdev)
0698 {
0699 cmf_generic_reset(cdev);
0700 }
0701
0702 static int cmf_enabled(struct ccw_device *cdev)
0703 {
0704 int enabled;
0705
0706 spin_lock_irq(cdev->ccwlock);
0707 enabled = !!cdev->private->cmb;
0708 spin_unlock_irq(cdev->ccwlock);
0709
0710 return enabled;
0711 }
0712
0713 static struct attribute_group cmf_attr_group;
0714
0715 static struct cmb_operations cmbops_basic = {
0716 .alloc = alloc_cmb,
0717 .free = free_cmb,
0718 .set = set_cmb,
0719 .read = read_cmb,
0720 .readall = readall_cmb,
0721 .reset = reset_cmb,
0722 .attr_group = &cmf_attr_group,
0723 };
0724
0725
0726
0727
0728
0729
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745 struct cmbe {
0746 u32 ssch_rsch_count;
0747 u32 sample_count;
0748 u32 device_connect_time;
0749 u32 function_pending_time;
0750 u32 device_disconnect_time;
0751 u32 control_unit_queuing_time;
0752 u32 device_active_only_time;
0753 u32 device_busy_time;
0754 u32 initial_command_response_time;
0755 u32 reserved[7];
0756 } __packed __aligned(64);
0757
0758 static struct kmem_cache *cmbe_cache;
0759
0760 static int alloc_cmbe(struct ccw_device *cdev)
0761 {
0762 struct cmb_data *cmb_data;
0763 struct cmbe *cmbe;
0764 int ret = -ENOMEM;
0765
0766 cmbe = kmem_cache_zalloc(cmbe_cache, GFP_KERNEL);
0767 if (!cmbe)
0768 return ret;
0769
0770 cmb_data = kzalloc(sizeof(*cmb_data), GFP_KERNEL);
0771 if (!cmb_data)
0772 goto out_free;
0773
0774 cmb_data->last_block = kzalloc(sizeof(struct cmbe), GFP_KERNEL);
0775 if (!cmb_data->last_block)
0776 goto out_free;
0777
0778 cmb_data->size = sizeof(*cmbe);
0779 cmb_data->hw_block = cmbe;
0780
0781 spin_lock(&cmb_area.lock);
0782 spin_lock_irq(cdev->ccwlock);
0783 if (cdev->private->cmb)
0784 goto out_unlock;
0785
0786 cdev->private->cmb = cmb_data;
0787
0788
0789 if (list_empty(&cmb_area.list))
0790 cmf_activate(NULL, CMF_ON);
0791 list_add_tail(&cdev->private->cmb_list, &cmb_area.list);
0792
0793 spin_unlock_irq(cdev->ccwlock);
0794 spin_unlock(&cmb_area.lock);
0795 return 0;
0796
0797 out_unlock:
0798 spin_unlock_irq(cdev->ccwlock);
0799 spin_unlock(&cmb_area.lock);
0800 ret = -EBUSY;
0801 out_free:
0802 if (cmb_data)
0803 kfree(cmb_data->last_block);
0804 kfree(cmb_data);
0805 kmem_cache_free(cmbe_cache, cmbe);
0806
0807 return ret;
0808 }
0809
0810 static void free_cmbe(struct ccw_device *cdev)
0811 {
0812 struct cmb_data *cmb_data;
0813
0814 spin_lock(&cmb_area.lock);
0815 spin_lock_irq(cdev->ccwlock);
0816 cmb_data = cdev->private->cmb;
0817 cdev->private->cmb = NULL;
0818 if (cmb_data) {
0819 kfree(cmb_data->last_block);
0820 kmem_cache_free(cmbe_cache, cmb_data->hw_block);
0821 }
0822 kfree(cmb_data);
0823
0824
0825 list_del_init(&cdev->private->cmb_list);
0826 if (list_empty(&cmb_area.list))
0827 cmf_activate(NULL, CMF_OFF);
0828 spin_unlock_irq(cdev->ccwlock);
0829 spin_unlock(&cmb_area.lock);
0830 }
0831
0832 static int set_cmbe(struct ccw_device *cdev, u32 mme)
0833 {
0834 unsigned long mba;
0835 struct cmb_data *cmb_data;
0836 unsigned long flags;
0837
0838 spin_lock_irqsave(cdev->ccwlock, flags);
0839 if (!cdev->private->cmb) {
0840 spin_unlock_irqrestore(cdev->ccwlock, flags);
0841 return -EINVAL;
0842 }
0843 cmb_data = cdev->private->cmb;
0844 mba = mme ? (unsigned long) cmb_data->hw_block : 0;
0845 spin_unlock_irqrestore(cdev->ccwlock, flags);
0846
0847 return set_schib_wait(cdev, mme, 1, mba);
0848 }
0849
0850 static u64 read_cmbe(struct ccw_device *cdev, int index)
0851 {
0852 struct cmb_data *cmb_data;
0853 unsigned long flags;
0854 struct cmbe *cmb;
0855 u64 ret = 0;
0856 u32 val;
0857
0858 spin_lock_irqsave(cdev->ccwlock, flags);
0859 cmb_data = cdev->private->cmb;
0860 if (!cmb_data)
0861 goto out;
0862
0863 cmb = cmb_data->hw_block;
0864 switch (index) {
0865 case avg_utilization:
0866 ret = __cmb_utilization(cmb->device_connect_time,
0867 cmb->function_pending_time,
0868 cmb->device_disconnect_time,
0869 cdev->private->cmb_start_time);
0870 goto out;
0871 case cmb_ssch_rsch_count:
0872 ret = cmb->ssch_rsch_count;
0873 goto out;
0874 case cmb_sample_count:
0875 ret = cmb->sample_count;
0876 goto out;
0877 case cmb_device_connect_time:
0878 val = cmb->device_connect_time;
0879 break;
0880 case cmb_function_pending_time:
0881 val = cmb->function_pending_time;
0882 break;
0883 case cmb_device_disconnect_time:
0884 val = cmb->device_disconnect_time;
0885 break;
0886 case cmb_control_unit_queuing_time:
0887 val = cmb->control_unit_queuing_time;
0888 break;
0889 case cmb_device_active_only_time:
0890 val = cmb->device_active_only_time;
0891 break;
0892 case cmb_device_busy_time:
0893 val = cmb->device_busy_time;
0894 break;
0895 case cmb_initial_command_response_time:
0896 val = cmb->initial_command_response_time;
0897 break;
0898 default:
0899 goto out;
0900 }
0901 ret = time_to_avg_nsec(val, cmb->sample_count);
0902 out:
0903 spin_unlock_irqrestore(cdev->ccwlock, flags);
0904 return ret;
0905 }
0906
0907 static int readall_cmbe(struct ccw_device *cdev, struct cmbdata *data)
0908 {
0909 struct cmbe *cmb;
0910 struct cmb_data *cmb_data;
0911 u64 time;
0912 unsigned long flags;
0913 int ret;
0914
0915 ret = cmf_cmb_copy_wait(cdev);
0916 if (ret < 0)
0917 return ret;
0918 spin_lock_irqsave(cdev->ccwlock, flags);
0919 cmb_data = cdev->private->cmb;
0920 if (!cmb_data) {
0921 ret = -ENODEV;
0922 goto out;
0923 }
0924 if (cmb_data->last_update == 0) {
0925 ret = -EAGAIN;
0926 goto out;
0927 }
0928 time = cmb_data->last_update - cdev->private->cmb_start_time;
0929
0930 memset (data, 0, sizeof(struct cmbdata));
0931
0932
0933 data->size = offsetof(struct cmbdata, device_busy_time);
0934
0935 data->elapsed_time = tod_to_ns(time);
0936
0937 cmb = cmb_data->last_block;
0938
0939 data->ssch_rsch_count = cmb->ssch_rsch_count;
0940 data->sample_count = cmb->sample_count;
0941
0942
0943 data->device_connect_time = time_to_nsec(cmb->device_connect_time);
0944 data->function_pending_time = time_to_nsec(cmb->function_pending_time);
0945 data->device_disconnect_time =
0946 time_to_nsec(cmb->device_disconnect_time);
0947 data->control_unit_queuing_time
0948 = time_to_nsec(cmb->control_unit_queuing_time);
0949 data->device_active_only_time
0950 = time_to_nsec(cmb->device_active_only_time);
0951 data->device_busy_time = time_to_nsec(cmb->device_busy_time);
0952 data->initial_command_response_time
0953 = time_to_nsec(cmb->initial_command_response_time);
0954
0955 ret = 0;
0956 out:
0957 spin_unlock_irqrestore(cdev->ccwlock, flags);
0958 return ret;
0959 }
0960
0961 static void reset_cmbe(struct ccw_device *cdev)
0962 {
0963 cmf_generic_reset(cdev);
0964 }
0965
0966 static struct attribute_group cmf_attr_group_ext;
0967
0968 static struct cmb_operations cmbops_extended = {
0969 .alloc = alloc_cmbe,
0970 .free = free_cmbe,
0971 .set = set_cmbe,
0972 .read = read_cmbe,
0973 .readall = readall_cmbe,
0974 .reset = reset_cmbe,
0975 .attr_group = &cmf_attr_group_ext,
0976 };
0977
0978 static ssize_t cmb_show_attr(struct device *dev, char *buf, enum cmb_index idx)
0979 {
0980 return sprintf(buf, "%lld\n",
0981 (unsigned long long) cmf_read(to_ccwdev(dev), idx));
0982 }
0983
0984 static ssize_t cmb_show_avg_sample_interval(struct device *dev,
0985 struct device_attribute *attr,
0986 char *buf)
0987 {
0988 struct ccw_device *cdev = to_ccwdev(dev);
0989 unsigned long count;
0990 long interval;
0991
0992 count = cmf_read(cdev, cmb_sample_count);
0993 spin_lock_irq(cdev->ccwlock);
0994 if (count) {
0995 interval = get_tod_clock() - cdev->private->cmb_start_time;
0996 interval = tod_to_ns(interval);
0997 interval /= count;
0998 } else
0999 interval = -1;
1000 spin_unlock_irq(cdev->ccwlock);
1001 return sprintf(buf, "%ld\n", interval);
1002 }
1003
1004 static ssize_t cmb_show_avg_utilization(struct device *dev,
1005 struct device_attribute *attr,
1006 char *buf)
1007 {
1008 unsigned long u = cmf_read(to_ccwdev(dev), avg_utilization);
1009
1010 return sprintf(buf, "%02lu.%01lu%%\n", u / 10, u % 10);
1011 }
1012
1013 #define cmf_attr(name) \
1014 static ssize_t show_##name(struct device *dev, \
1015 struct device_attribute *attr, char *buf) \
1016 { return cmb_show_attr((dev), buf, cmb_##name); } \
1017 static DEVICE_ATTR(name, 0444, show_##name, NULL);
1018
1019 #define cmf_attr_avg(name) \
1020 static ssize_t show_avg_##name(struct device *dev, \
1021 struct device_attribute *attr, char *buf) \
1022 { return cmb_show_attr((dev), buf, cmb_##name); } \
1023 static DEVICE_ATTR(avg_##name, 0444, show_avg_##name, NULL);
1024
1025 cmf_attr(ssch_rsch_count);
1026 cmf_attr(sample_count);
1027 cmf_attr_avg(device_connect_time);
1028 cmf_attr_avg(function_pending_time);
1029 cmf_attr_avg(device_disconnect_time);
1030 cmf_attr_avg(control_unit_queuing_time);
1031 cmf_attr_avg(device_active_only_time);
1032 cmf_attr_avg(device_busy_time);
1033 cmf_attr_avg(initial_command_response_time);
1034
1035 static DEVICE_ATTR(avg_sample_interval, 0444, cmb_show_avg_sample_interval,
1036 NULL);
1037 static DEVICE_ATTR(avg_utilization, 0444, cmb_show_avg_utilization, NULL);
1038
1039 static struct attribute *cmf_attributes[] = {
1040 &dev_attr_avg_sample_interval.attr,
1041 &dev_attr_avg_utilization.attr,
1042 &dev_attr_ssch_rsch_count.attr,
1043 &dev_attr_sample_count.attr,
1044 &dev_attr_avg_device_connect_time.attr,
1045 &dev_attr_avg_function_pending_time.attr,
1046 &dev_attr_avg_device_disconnect_time.attr,
1047 &dev_attr_avg_control_unit_queuing_time.attr,
1048 &dev_attr_avg_device_active_only_time.attr,
1049 NULL,
1050 };
1051
1052 static struct attribute_group cmf_attr_group = {
1053 .name = "cmf",
1054 .attrs = cmf_attributes,
1055 };
1056
1057 static struct attribute *cmf_attributes_ext[] = {
1058 &dev_attr_avg_sample_interval.attr,
1059 &dev_attr_avg_utilization.attr,
1060 &dev_attr_ssch_rsch_count.attr,
1061 &dev_attr_sample_count.attr,
1062 &dev_attr_avg_device_connect_time.attr,
1063 &dev_attr_avg_function_pending_time.attr,
1064 &dev_attr_avg_device_disconnect_time.attr,
1065 &dev_attr_avg_control_unit_queuing_time.attr,
1066 &dev_attr_avg_device_active_only_time.attr,
1067 &dev_attr_avg_device_busy_time.attr,
1068 &dev_attr_avg_initial_command_response_time.attr,
1069 NULL,
1070 };
1071
1072 static struct attribute_group cmf_attr_group_ext = {
1073 .name = "cmf",
1074 .attrs = cmf_attributes_ext,
1075 };
1076
1077 static ssize_t cmb_enable_show(struct device *dev,
1078 struct device_attribute *attr,
1079 char *buf)
1080 {
1081 struct ccw_device *cdev = to_ccwdev(dev);
1082
1083 return sprintf(buf, "%d\n", cmf_enabled(cdev));
1084 }
1085
1086 static ssize_t cmb_enable_store(struct device *dev,
1087 struct device_attribute *attr, const char *buf,
1088 size_t c)
1089 {
1090 struct ccw_device *cdev = to_ccwdev(dev);
1091 unsigned long val;
1092 int ret;
1093
1094 ret = kstrtoul(buf, 16, &val);
1095 if (ret)
1096 return ret;
1097
1098 switch (val) {
1099 case 0:
1100 ret = disable_cmf(cdev);
1101 break;
1102 case 1:
1103 ret = enable_cmf(cdev);
1104 break;
1105 default:
1106 ret = -EINVAL;
1107 }
1108
1109 return ret ? ret : c;
1110 }
1111 DEVICE_ATTR_RW(cmb_enable);
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124 int enable_cmf(struct ccw_device *cdev)
1125 {
1126 int ret = 0;
1127
1128 device_lock(&cdev->dev);
1129 if (cmf_enabled(cdev)) {
1130 cmbops->reset(cdev);
1131 goto out_unlock;
1132 }
1133 get_device(&cdev->dev);
1134 ret = cmbops->alloc(cdev);
1135 if (ret)
1136 goto out;
1137 cmbops->reset(cdev);
1138 ret = sysfs_create_group(&cdev->dev.kobj, cmbops->attr_group);
1139 if (ret) {
1140 cmbops->free(cdev);
1141 goto out;
1142 }
1143 ret = cmbops->set(cdev, 2);
1144 if (ret) {
1145 sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group);
1146 cmbops->free(cdev);
1147 }
1148 out:
1149 if (ret)
1150 put_device(&cdev->dev);
1151 out_unlock:
1152 device_unlock(&cdev->dev);
1153 return ret;
1154 }
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165 int __disable_cmf(struct ccw_device *cdev)
1166 {
1167 int ret;
1168
1169 ret = cmbops->set(cdev, 0);
1170 if (ret)
1171 return ret;
1172
1173 sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group);
1174 cmbops->free(cdev);
1175 put_device(&cdev->dev);
1176
1177 return ret;
1178 }
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189 int disable_cmf(struct ccw_device *cdev)
1190 {
1191 int ret;
1192
1193 device_lock(&cdev->dev);
1194 ret = __disable_cmf(cdev);
1195 device_unlock(&cdev->dev);
1196
1197 return ret;
1198 }
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210 u64 cmf_read(struct ccw_device *cdev, int index)
1211 {
1212 return cmbops->read(cdev, index);
1213 }
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225 int cmf_readall(struct ccw_device *cdev, struct cmbdata *data)
1226 {
1227 return cmbops->readall(cdev, data);
1228 }
1229
1230
1231 int cmf_reenable(struct ccw_device *cdev)
1232 {
1233 cmbops->reset(cdev);
1234 return cmbops->set(cdev, 2);
1235 }
1236
1237
1238
1239
1240
1241
1242 void cmf_reactivate(void)
1243 {
1244 spin_lock(&cmb_area.lock);
1245 if (!list_empty(&cmb_area.list))
1246 cmf_activate(cmb_area.mem, CMF_ON);
1247 spin_unlock(&cmb_area.lock);
1248 }
1249
1250 static int __init init_cmbe(void)
1251 {
1252 cmbe_cache = kmem_cache_create("cmbe_cache", sizeof(struct cmbe),
1253 __alignof__(struct cmbe), 0, NULL);
1254
1255 return cmbe_cache ? 0 : -ENOMEM;
1256 }
1257
1258 static int __init init_cmf(void)
1259 {
1260 char *format_string;
1261 char *detect_string;
1262 int ret;
1263
1264
1265
1266
1267
1268
1269 if (format == CMF_AUTODETECT) {
1270 if (!css_general_characteristics.ext_mb) {
1271 format = CMF_BASIC;
1272 } else {
1273 format = CMF_EXTENDED;
1274 }
1275 detect_string = "autodetected";
1276 } else {
1277 detect_string = "parameter";
1278 }
1279
1280 switch (format) {
1281 case CMF_BASIC:
1282 format_string = "basic";
1283 cmbops = &cmbops_basic;
1284 break;
1285 case CMF_EXTENDED:
1286 format_string = "extended";
1287 cmbops = &cmbops_extended;
1288
1289 ret = init_cmbe();
1290 if (ret)
1291 return ret;
1292 break;
1293 default:
1294 return -EINVAL;
1295 }
1296 pr_info("Channel measurement facility initialized using format "
1297 "%s (mode %s)\n", format_string, detect_string);
1298 return 0;
1299 }
1300 device_initcall(init_cmf);
1301
1302 EXPORT_SYMBOL_GPL(enable_cmf);
1303 EXPORT_SYMBOL_GPL(disable_cmf);
1304 EXPORT_SYMBOL_GPL(cmf_read);
1305 EXPORT_SYMBOL_GPL(cmf_readall);