0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <asm/vio.h>
0012 #include <asm/hvcall.h>
0013 #include <asm/vas.h>
0014
0015 #include "nx-842.h"
0016 #include "nx_csbcpb.h" /* struct nx_csbcpb */
0017
0018 MODULE_LICENSE("GPL");
0019 MODULE_AUTHOR("Robert Jennings <rcj@linux.vnet.ibm.com>");
0020 MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors");
0021 MODULE_ALIAS_CRYPTO("842");
0022 MODULE_ALIAS_CRYPTO("842-nx");
0023
0024
0025
0026
0027 struct hv_nx_cop_caps {
0028 __be64 descriptor;
0029 __be64 req_max_processed_len;
0030 __be64 min_compress_len;
0031 __be64 min_decompress_len;
0032 } __packed __aligned(0x1000);
0033
0034
0035
0036
0037 struct nx_cop_caps {
0038 u64 descriptor;
0039 u64 req_max_processed_len;
0040 u64 min_compress_len;
0041 u64 min_decompress_len;
0042 };
0043
0044 static u64 caps_feat;
0045 static struct nx_cop_caps nx_cop_caps;
0046
0047 static struct nx842_constraints nx842_pseries_constraints = {
0048 .alignment = DDE_BUFFER_ALIGN,
0049 .multiple = DDE_BUFFER_LAST_MULT,
0050 .minimum = DDE_BUFFER_LAST_MULT,
0051 .maximum = PAGE_SIZE,
0052 };
0053
0054 static int check_constraints(unsigned long buf, unsigned int *len, bool in)
0055 {
0056 if (!IS_ALIGNED(buf, nx842_pseries_constraints.alignment)) {
0057 pr_debug("%s buffer 0x%lx not aligned to 0x%x\n",
0058 in ? "input" : "output", buf,
0059 nx842_pseries_constraints.alignment);
0060 return -EINVAL;
0061 }
0062 if (*len % nx842_pseries_constraints.multiple) {
0063 pr_debug("%s buffer len 0x%x not multiple of 0x%x\n",
0064 in ? "input" : "output", *len,
0065 nx842_pseries_constraints.multiple);
0066 if (in)
0067 return -EINVAL;
0068 *len = round_down(*len, nx842_pseries_constraints.multiple);
0069 }
0070 if (*len < nx842_pseries_constraints.minimum) {
0071 pr_debug("%s buffer len 0x%x under minimum 0x%x\n",
0072 in ? "input" : "output", *len,
0073 nx842_pseries_constraints.minimum);
0074 return -EINVAL;
0075 }
0076 if (*len > nx842_pseries_constraints.maximum) {
0077 pr_debug("%s buffer len 0x%x over maximum 0x%x\n",
0078 in ? "input" : "output", *len,
0079 nx842_pseries_constraints.maximum);
0080 if (in)
0081 return -EINVAL;
0082 *len = nx842_pseries_constraints.maximum;
0083 }
0084 return 0;
0085 }
0086
0087
0088 #define WORKMEM_ALIGN (256)
0089
0090 struct nx842_workmem {
0091
0092 char slin[4096];
0093 char slout[4096];
0094
0095 struct nx_csbcpb csbcpb;
0096
0097 char padding[WORKMEM_ALIGN];
0098 } __aligned(WORKMEM_ALIGN);
0099
0100
0101
0102 #define NX842_CSBCBP_VALID_CHK(x) (x & BIT_MASK(7))
0103
0104
0105
0106
0107
0108 #define NX842_CSBCPB_CE0(x) (x & BIT_MASK(7))
0109 #define NX842_CSBCPB_CE1(x) (x & BIT_MASK(6))
0110 #define NX842_CSBCPB_CE2(x) (x & BIT_MASK(5))
0111
0112
0113 #define NX842_HW_PAGE_SIZE (4096)
0114 #define NX842_HW_PAGE_MASK (~(NX842_HW_PAGE_SIZE-1))
0115
0116 struct ibm_nx842_counters {
0117 atomic64_t comp_complete;
0118 atomic64_t comp_failed;
0119 atomic64_t decomp_complete;
0120 atomic64_t decomp_failed;
0121 atomic64_t swdecomp;
0122 atomic64_t comp_times[32];
0123 atomic64_t decomp_times[32];
0124 };
0125
0126 static struct nx842_devdata {
0127 struct vio_dev *vdev;
0128 struct device *dev;
0129 struct ibm_nx842_counters *counters;
0130 unsigned int max_sg_len;
0131 unsigned int max_sync_size;
0132 unsigned int max_sync_sg;
0133 } __rcu *devdata;
0134 static DEFINE_SPINLOCK(devdata_mutex);
0135
0136 #define NX842_COUNTER_INC(_x) \
0137 static inline void nx842_inc_##_x( \
0138 const struct nx842_devdata *dev) { \
0139 if (dev) \
0140 atomic64_inc(&dev->counters->_x); \
0141 }
0142 NX842_COUNTER_INC(comp_complete);
0143 NX842_COUNTER_INC(comp_failed);
0144 NX842_COUNTER_INC(decomp_complete);
0145 NX842_COUNTER_INC(decomp_failed);
0146 NX842_COUNTER_INC(swdecomp);
0147
0148 #define NX842_HIST_SLOTS 16
0149
0150 static void ibm_nx842_incr_hist(atomic64_t *times, unsigned int time)
0151 {
0152 int bucket = fls(time);
0153
0154 if (bucket)
0155 bucket = min((NX842_HIST_SLOTS - 1), bucket - 1);
0156
0157 atomic64_inc(×[bucket]);
0158 }
0159
0160
0161 #define NX842_OP_COMPRESS 0x0
0162 #define NX842_OP_CRC 0x1
0163 #define NX842_OP_DECOMPRESS 0x2
0164 #define NX842_OP_COMPRESS_CRC (NX842_OP_COMPRESS | NX842_OP_CRC)
0165 #define NX842_OP_DECOMPRESS_CRC (NX842_OP_DECOMPRESS | NX842_OP_CRC)
0166 #define NX842_OP_ASYNC (1<<23)
0167 #define NX842_OP_NOTIFY (1<<22)
0168 #define NX842_OP_NOTIFY_INT(x) ((x & 0xff)<<8)
0169
0170 static unsigned long nx842_get_desired_dma(struct vio_dev *viodev)
0171 {
0172
0173 return 0;
0174 }
0175
0176 struct nx842_slentry {
0177 __be64 ptr;
0178 __be64 len;
0179 };
0180
0181
0182 struct nx842_scatterlist {
0183 int entry_nr;
0184 struct nx842_slentry *entries;
0185 };
0186
0187
0188 static inline unsigned long nx842_get_scatterlist_size(
0189 struct nx842_scatterlist *sl)
0190 {
0191 return sl->entry_nr * sizeof(struct nx842_slentry);
0192 }
0193
0194 static int nx842_build_scatterlist(unsigned long buf, int len,
0195 struct nx842_scatterlist *sl)
0196 {
0197 unsigned long entrylen;
0198 struct nx842_slentry *entry;
0199
0200 sl->entry_nr = 0;
0201
0202 entry = sl->entries;
0203 while (len) {
0204 entry->ptr = cpu_to_be64(nx842_get_pa((void *)buf));
0205 entrylen = min_t(int, len,
0206 LEN_ON_SIZE(buf, NX842_HW_PAGE_SIZE));
0207 entry->len = cpu_to_be64(entrylen);
0208
0209 len -= entrylen;
0210 buf += entrylen;
0211
0212 sl->entry_nr++;
0213 entry++;
0214 }
0215
0216 return 0;
0217 }
0218
0219 static int nx842_validate_result(struct device *dev,
0220 struct cop_status_block *csb)
0221 {
0222
0223 if (!NX842_CSBCBP_VALID_CHK(csb->valid)) {
0224 dev_err(dev, "%s: cspcbp not valid upon completion.\n",
0225 __func__);
0226 dev_dbg(dev, "valid:0x%02x cs:0x%02x cc:0x%02x ce:0x%02x\n",
0227 csb->valid,
0228 csb->crb_seq_number,
0229 csb->completion_code,
0230 csb->completion_extension);
0231 dev_dbg(dev, "processed_bytes:%d address:0x%016lx\n",
0232 be32_to_cpu(csb->processed_byte_count),
0233 (unsigned long)be64_to_cpu(csb->address));
0234 return -EIO;
0235 }
0236
0237
0238 switch (csb->completion_code) {
0239 case 0:
0240 break;
0241 case 64:
0242 dev_dbg(dev, "%s: output size larger than input size\n",
0243 __func__);
0244 break;
0245 case 13:
0246 dev_dbg(dev, "%s: Out of space in output buffer\n",
0247 __func__);
0248 return -ENOSPC;
0249 case 65:
0250 dev_dbg(dev, "%s: CRC mismatch for decompression\n",
0251 __func__);
0252 return -EINVAL;
0253 case 66:
0254 case 67:
0255 dev_dbg(dev, "%s: Bad data for decompression (code:%d)\n",
0256 __func__, csb->completion_code);
0257 return -EINVAL;
0258 default:
0259 dev_dbg(dev, "%s: Unspecified error (code:%d)\n",
0260 __func__, csb->completion_code);
0261 return -EIO;
0262 }
0263
0264
0265 if (!NX842_CSBCPB_CE2(csb->completion_extension)) {
0266 dev_err(dev, "%s: No error returned by hardware, but "
0267 "data returned is unusable, contact support.\n"
0268 "(Additional info: csbcbp->processed bytes "
0269 "does not specify processed bytes for the "
0270 "target buffer.)\n", __func__);
0271 return -EIO;
0272 }
0273
0274 return 0;
0275 }
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302 static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen,
0303 unsigned char *out, unsigned int *outlen,
0304 void *wmem)
0305 {
0306 struct nx842_devdata *local_devdata;
0307 struct device *dev = NULL;
0308 struct nx842_workmem *workmem;
0309 struct nx842_scatterlist slin, slout;
0310 struct nx_csbcpb *csbcpb;
0311 int ret = 0;
0312 unsigned long inbuf, outbuf;
0313 struct vio_pfo_op op = {
0314 .done = NULL,
0315 .handle = 0,
0316 .timeout = 0,
0317 };
0318 unsigned long start = get_tb();
0319
0320 inbuf = (unsigned long)in;
0321 if (check_constraints(inbuf, &inlen, true))
0322 return -EINVAL;
0323
0324 outbuf = (unsigned long)out;
0325 if (check_constraints(outbuf, outlen, false))
0326 return -EINVAL;
0327
0328 rcu_read_lock();
0329 local_devdata = rcu_dereference(devdata);
0330 if (!local_devdata || !local_devdata->dev) {
0331 rcu_read_unlock();
0332 return -ENODEV;
0333 }
0334 dev = local_devdata->dev;
0335
0336
0337 workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN);
0338 slin.entries = (struct nx842_slentry *)workmem->slin;
0339 slout.entries = (struct nx842_slentry *)workmem->slout;
0340
0341
0342 op.flags = NX842_OP_COMPRESS_CRC;
0343 csbcpb = &workmem->csbcpb;
0344 memset(csbcpb, 0, sizeof(*csbcpb));
0345 op.csbcpb = nx842_get_pa(csbcpb);
0346
0347 if ((inbuf & NX842_HW_PAGE_MASK) ==
0348 ((inbuf + inlen - 1) & NX842_HW_PAGE_MASK)) {
0349
0350 op.in = nx842_get_pa((void *)inbuf);
0351 op.inlen = inlen;
0352 } else {
0353
0354 nx842_build_scatterlist(inbuf, inlen, &slin);
0355 op.in = nx842_get_pa(slin.entries);
0356 op.inlen = -nx842_get_scatterlist_size(&slin);
0357 }
0358
0359 if ((outbuf & NX842_HW_PAGE_MASK) ==
0360 ((outbuf + *outlen - 1) & NX842_HW_PAGE_MASK)) {
0361
0362 op.out = nx842_get_pa((void *)outbuf);
0363 op.outlen = *outlen;
0364 } else {
0365
0366 nx842_build_scatterlist(outbuf, *outlen, &slout);
0367 op.out = nx842_get_pa(slout.entries);
0368 op.outlen = -nx842_get_scatterlist_size(&slout);
0369 }
0370
0371 dev_dbg(dev, "%s: op.in %lx op.inlen %ld op.out %lx op.outlen %ld\n",
0372 __func__, (unsigned long)op.in, (long)op.inlen,
0373 (unsigned long)op.out, (long)op.outlen);
0374
0375
0376 ret = vio_h_cop_sync(local_devdata->vdev, &op);
0377
0378
0379 if (ret) {
0380 dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n",
0381 __func__, ret, op.hcall_err);
0382 ret = -EIO;
0383 goto unlock;
0384 }
0385
0386
0387 ret = nx842_validate_result(dev, &csbcpb->csb);
0388 if (ret)
0389 goto unlock;
0390
0391 *outlen = be32_to_cpu(csbcpb->csb.processed_byte_count);
0392 dev_dbg(dev, "%s: processed_bytes=%d\n", __func__, *outlen);
0393
0394 unlock:
0395 if (ret)
0396 nx842_inc_comp_failed(local_devdata);
0397 else {
0398 nx842_inc_comp_complete(local_devdata);
0399 ibm_nx842_incr_hist(local_devdata->counters->comp_times,
0400 (get_tb() - start) / tb_ticks_per_usec);
0401 }
0402 rcu_read_unlock();
0403 return ret;
0404 }
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432 static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen,
0433 unsigned char *out, unsigned int *outlen,
0434 void *wmem)
0435 {
0436 struct nx842_devdata *local_devdata;
0437 struct device *dev = NULL;
0438 struct nx842_workmem *workmem;
0439 struct nx842_scatterlist slin, slout;
0440 struct nx_csbcpb *csbcpb;
0441 int ret = 0;
0442 unsigned long inbuf, outbuf;
0443 struct vio_pfo_op op = {
0444 .done = NULL,
0445 .handle = 0,
0446 .timeout = 0,
0447 };
0448 unsigned long start = get_tb();
0449
0450
0451 inbuf = (unsigned long)in;
0452 if (check_constraints(inbuf, &inlen, true))
0453 return -EINVAL;
0454
0455 outbuf = (unsigned long)out;
0456 if (check_constraints(outbuf, outlen, false))
0457 return -EINVAL;
0458
0459 rcu_read_lock();
0460 local_devdata = rcu_dereference(devdata);
0461 if (!local_devdata || !local_devdata->dev) {
0462 rcu_read_unlock();
0463 return -ENODEV;
0464 }
0465 dev = local_devdata->dev;
0466
0467 workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN);
0468
0469
0470 slin.entries = (struct nx842_slentry *)workmem->slin;
0471 slout.entries = (struct nx842_slentry *)workmem->slout;
0472
0473
0474 op.flags = NX842_OP_DECOMPRESS_CRC;
0475 csbcpb = &workmem->csbcpb;
0476 memset(csbcpb, 0, sizeof(*csbcpb));
0477 op.csbcpb = nx842_get_pa(csbcpb);
0478
0479 if ((inbuf & NX842_HW_PAGE_MASK) ==
0480 ((inbuf + inlen - 1) & NX842_HW_PAGE_MASK)) {
0481
0482 op.in = nx842_get_pa((void *)inbuf);
0483 op.inlen = inlen;
0484 } else {
0485
0486 nx842_build_scatterlist(inbuf, inlen, &slin);
0487 op.in = nx842_get_pa(slin.entries);
0488 op.inlen = -nx842_get_scatterlist_size(&slin);
0489 }
0490
0491 if ((outbuf & NX842_HW_PAGE_MASK) ==
0492 ((outbuf + *outlen - 1) & NX842_HW_PAGE_MASK)) {
0493
0494 op.out = nx842_get_pa((void *)outbuf);
0495 op.outlen = *outlen;
0496 } else {
0497
0498 nx842_build_scatterlist(outbuf, *outlen, &slout);
0499 op.out = nx842_get_pa(slout.entries);
0500 op.outlen = -nx842_get_scatterlist_size(&slout);
0501 }
0502
0503 dev_dbg(dev, "%s: op.in %lx op.inlen %ld op.out %lx op.outlen %ld\n",
0504 __func__, (unsigned long)op.in, (long)op.inlen,
0505 (unsigned long)op.out, (long)op.outlen);
0506
0507
0508 ret = vio_h_cop_sync(local_devdata->vdev, &op);
0509
0510
0511 if (ret) {
0512 dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n",
0513 __func__, ret, op.hcall_err);
0514 goto unlock;
0515 }
0516
0517
0518 ret = nx842_validate_result(dev, &csbcpb->csb);
0519 if (ret)
0520 goto unlock;
0521
0522 *outlen = be32_to_cpu(csbcpb->csb.processed_byte_count);
0523
0524 unlock:
0525 if (ret)
0526
0527 nx842_inc_decomp_failed(local_devdata);
0528 else {
0529 nx842_inc_decomp_complete(local_devdata);
0530 ibm_nx842_incr_hist(local_devdata->counters->decomp_times,
0531 (get_tb() - start) / tb_ticks_per_usec);
0532 }
0533
0534 rcu_read_unlock();
0535 return ret;
0536 }
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547 static int nx842_OF_set_defaults(struct nx842_devdata *devdata)
0548 {
0549 if (devdata) {
0550 devdata->max_sync_size = 0;
0551 devdata->max_sync_sg = 0;
0552 devdata->max_sg_len = 0;
0553 return 0;
0554 } else
0555 return -ENOENT;
0556 }
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573 static int nx842_OF_upd_status(struct nx842_devdata *devdata,
0574 struct property *prop)
0575 {
0576 const char *status = (const char *)prop->value;
0577
0578 if (!strncmp(status, "okay", (size_t)prop->length))
0579 return 0;
0580 if (!strncmp(status, "disabled", (size_t)prop->length))
0581 return -ENODEV;
0582 dev_info(devdata->dev, "%s: unknown status '%s'\n", __func__, status);
0583
0584 return -EINVAL;
0585 }
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607
0608 static int nx842_OF_upd_maxsglen(struct nx842_devdata *devdata,
0609 struct property *prop) {
0610 int ret = 0;
0611 const unsigned int maxsglen = of_read_number(prop->value, 1);
0612
0613 if (prop->length != sizeof(maxsglen)) {
0614 dev_err(devdata->dev, "%s: unexpected format for ibm,max-sg-len property\n", __func__);
0615 dev_dbg(devdata->dev, "%s: ibm,max-sg-len is %d bytes long, expected %lu bytes\n", __func__,
0616 prop->length, sizeof(maxsglen));
0617 ret = -EINVAL;
0618 } else {
0619 devdata->max_sg_len = min_t(unsigned int,
0620 maxsglen, NX842_HW_PAGE_SIZE);
0621 }
0622
0623 return ret;
0624 }
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656 static int nx842_OF_upd_maxsyncop(struct nx842_devdata *devdata,
0657 struct property *prop) {
0658 int ret = 0;
0659 unsigned int comp_data_limit, decomp_data_limit;
0660 unsigned int comp_sg_limit, decomp_sg_limit;
0661 const struct maxsynccop_t {
0662 __be32 comp_elements;
0663 __be32 comp_data_limit;
0664 __be32 comp_sg_limit;
0665 __be32 decomp_elements;
0666 __be32 decomp_data_limit;
0667 __be32 decomp_sg_limit;
0668 } *maxsynccop;
0669
0670 if (prop->length != sizeof(*maxsynccop)) {
0671 dev_err(devdata->dev, "%s: unexpected format for ibm,max-sync-cop property\n", __func__);
0672 dev_dbg(devdata->dev, "%s: ibm,max-sync-cop is %d bytes long, expected %lu bytes\n", __func__, prop->length,
0673 sizeof(*maxsynccop));
0674 ret = -EINVAL;
0675 goto out;
0676 }
0677
0678 maxsynccop = (const struct maxsynccop_t *)prop->value;
0679 comp_data_limit = be32_to_cpu(maxsynccop->comp_data_limit);
0680 comp_sg_limit = be32_to_cpu(maxsynccop->comp_sg_limit);
0681 decomp_data_limit = be32_to_cpu(maxsynccop->decomp_data_limit);
0682 decomp_sg_limit = be32_to_cpu(maxsynccop->decomp_sg_limit);
0683
0684
0685
0686
0687
0688 devdata->max_sync_size = min(comp_data_limit, decomp_data_limit);
0689
0690 devdata->max_sync_size = min_t(unsigned int, devdata->max_sync_size,
0691 65536);
0692
0693 if (devdata->max_sync_size < 4096) {
0694 dev_err(devdata->dev, "%s: hardware max data size (%u) is "
0695 "less than the driver minimum, unable to use "
0696 "the hardware device\n",
0697 __func__, devdata->max_sync_size);
0698 ret = -EINVAL;
0699 goto out;
0700 }
0701
0702 nx842_pseries_constraints.maximum = devdata->max_sync_size;
0703
0704 devdata->max_sync_sg = min(comp_sg_limit, decomp_sg_limit);
0705 if (devdata->max_sync_sg < 1) {
0706 dev_err(devdata->dev, "%s: hardware max sg size (%u) is "
0707 "less than the driver minimum, unable to use "
0708 "the hardware device\n",
0709 __func__, devdata->max_sync_sg);
0710 ret = -EINVAL;
0711 goto out;
0712 }
0713
0714 out:
0715 return ret;
0716 }
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732
0733
0734
0735
0736 static int nx842_OF_upd(struct property *new_prop)
0737 {
0738 struct nx842_devdata *old_devdata = NULL;
0739 struct nx842_devdata *new_devdata = NULL;
0740 struct device_node *of_node = NULL;
0741 struct property *status = NULL;
0742 struct property *maxsglen = NULL;
0743 struct property *maxsyncop = NULL;
0744 int ret = 0;
0745 unsigned long flags;
0746
0747 new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
0748 if (!new_devdata)
0749 return -ENOMEM;
0750
0751 spin_lock_irqsave(&devdata_mutex, flags);
0752 old_devdata = rcu_dereference_check(devdata,
0753 lockdep_is_held(&devdata_mutex));
0754 if (old_devdata)
0755 of_node = old_devdata->dev->of_node;
0756
0757 if (!old_devdata || !of_node) {
0758 pr_err("%s: device is not available\n", __func__);
0759 spin_unlock_irqrestore(&devdata_mutex, flags);
0760 kfree(new_devdata);
0761 return -ENODEV;
0762 }
0763
0764 memcpy(new_devdata, old_devdata, sizeof(*old_devdata));
0765 new_devdata->counters = old_devdata->counters;
0766
0767
0768 status = of_find_property(of_node, "status", NULL);
0769 maxsglen = of_find_property(of_node, "ibm,max-sg-len", NULL);
0770 maxsyncop = of_find_property(of_node, "ibm,max-sync-cop", NULL);
0771 if (!status || !maxsglen || !maxsyncop) {
0772 dev_err(old_devdata->dev, "%s: Could not locate device properties\n", __func__);
0773 ret = -EINVAL;
0774 goto error_out;
0775 }
0776
0777
0778
0779
0780
0781 if (new_prop && (strncmp(new_prop->name, "status", new_prop->length) ||
0782 strncmp(new_prop->name, "ibm,max-sg-len", new_prop->length) ||
0783 strncmp(new_prop->name, "ibm,max-sync-cop", new_prop->length)))
0784 goto out;
0785
0786
0787 ret = nx842_OF_upd_status(new_devdata, status);
0788 if (ret)
0789 goto error_out;
0790
0791 ret = nx842_OF_upd_maxsglen(new_devdata, maxsglen);
0792 if (ret)
0793 goto error_out;
0794
0795 ret = nx842_OF_upd_maxsyncop(new_devdata, maxsyncop);
0796 if (ret)
0797 goto error_out;
0798
0799 out:
0800 dev_info(old_devdata->dev, "%s: max_sync_size new:%u old:%u\n",
0801 __func__, new_devdata->max_sync_size,
0802 old_devdata->max_sync_size);
0803 dev_info(old_devdata->dev, "%s: max_sync_sg new:%u old:%u\n",
0804 __func__, new_devdata->max_sync_sg,
0805 old_devdata->max_sync_sg);
0806 dev_info(old_devdata->dev, "%s: max_sg_len new:%u old:%u\n",
0807 __func__, new_devdata->max_sg_len,
0808 old_devdata->max_sg_len);
0809
0810 rcu_assign_pointer(devdata, new_devdata);
0811 spin_unlock_irqrestore(&devdata_mutex, flags);
0812 synchronize_rcu();
0813 dev_set_drvdata(new_devdata->dev, new_devdata);
0814 kfree(old_devdata);
0815 return 0;
0816
0817 error_out:
0818 if (new_devdata) {
0819 dev_info(old_devdata->dev, "%s: device disabled\n", __func__);
0820 nx842_OF_set_defaults(new_devdata);
0821 rcu_assign_pointer(devdata, new_devdata);
0822 spin_unlock_irqrestore(&devdata_mutex, flags);
0823 synchronize_rcu();
0824 dev_set_drvdata(new_devdata->dev, new_devdata);
0825 kfree(old_devdata);
0826 } else {
0827 dev_err(old_devdata->dev, "%s: could not update driver from hardware\n", __func__);
0828 spin_unlock_irqrestore(&devdata_mutex, flags);
0829 }
0830
0831 if (!ret)
0832 ret = -EINVAL;
0833 return ret;
0834 }
0835
0836
0837
0838
0839
0840
0841
0842
0843
0844
0845
0846
0847
0848 static int nx842_OF_notifier(struct notifier_block *np, unsigned long action,
0849 void *data)
0850 {
0851 struct of_reconfig_data *upd = data;
0852 struct nx842_devdata *local_devdata;
0853 struct device_node *node = NULL;
0854
0855 rcu_read_lock();
0856 local_devdata = rcu_dereference(devdata);
0857 if (local_devdata)
0858 node = local_devdata->dev->of_node;
0859
0860 if (local_devdata &&
0861 action == OF_RECONFIG_UPDATE_PROPERTY &&
0862 !strcmp(upd->dn->name, node->name)) {
0863 rcu_read_unlock();
0864 nx842_OF_upd(upd->prop);
0865 } else
0866 rcu_read_unlock();
0867
0868 return NOTIFY_OK;
0869 }
0870
0871 static struct notifier_block nx842_of_nb = {
0872 .notifier_call = nx842_OF_notifier,
0873 };
0874
0875 #define nx842_counter_read(_name) \
0876 static ssize_t nx842_##_name##_show(struct device *dev, \
0877 struct device_attribute *attr, \
0878 char *buf) { \
0879 struct nx842_devdata *local_devdata; \
0880 int p = 0; \
0881 rcu_read_lock(); \
0882 local_devdata = rcu_dereference(devdata); \
0883 if (local_devdata) \
0884 p = snprintf(buf, PAGE_SIZE, "%lld\n", \
0885 atomic64_read(&local_devdata->counters->_name)); \
0886 rcu_read_unlock(); \
0887 return p; \
0888 }
0889
0890 #define NX842DEV_COUNTER_ATTR_RO(_name) \
0891 nx842_counter_read(_name); \
0892 static struct device_attribute dev_attr_##_name = __ATTR(_name, \
0893 0444, \
0894 nx842_##_name##_show,\
0895 NULL);
0896
0897 NX842DEV_COUNTER_ATTR_RO(comp_complete);
0898 NX842DEV_COUNTER_ATTR_RO(comp_failed);
0899 NX842DEV_COUNTER_ATTR_RO(decomp_complete);
0900 NX842DEV_COUNTER_ATTR_RO(decomp_failed);
0901 NX842DEV_COUNTER_ATTR_RO(swdecomp);
0902
0903 static ssize_t nx842_timehist_show(struct device *,
0904 struct device_attribute *, char *);
0905
0906 static struct device_attribute dev_attr_comp_times = __ATTR(comp_times, 0444,
0907 nx842_timehist_show, NULL);
0908 static struct device_attribute dev_attr_decomp_times = __ATTR(decomp_times,
0909 0444, nx842_timehist_show, NULL);
0910
0911 static ssize_t nx842_timehist_show(struct device *dev,
0912 struct device_attribute *attr, char *buf) {
0913 char *p = buf;
0914 struct nx842_devdata *local_devdata;
0915 atomic64_t *times;
0916 int bytes_remain = PAGE_SIZE;
0917 int bytes;
0918 int i;
0919
0920 rcu_read_lock();
0921 local_devdata = rcu_dereference(devdata);
0922 if (!local_devdata) {
0923 rcu_read_unlock();
0924 return 0;
0925 }
0926
0927 if (attr == &dev_attr_comp_times)
0928 times = local_devdata->counters->comp_times;
0929 else if (attr == &dev_attr_decomp_times)
0930 times = local_devdata->counters->decomp_times;
0931 else {
0932 rcu_read_unlock();
0933 return 0;
0934 }
0935
0936 for (i = 0; i < (NX842_HIST_SLOTS - 2); i++) {
0937 bytes = snprintf(p, bytes_remain, "%u-%uus:\t%lld\n",
0938 i ? (2<<(i-1)) : 0, (2<<i)-1,
0939 atomic64_read(×[i]));
0940 bytes_remain -= bytes;
0941 p += bytes;
0942 }
0943
0944
0945 bytes = snprintf(p, bytes_remain, "%uus - :\t%lld\n",
0946 2<<(NX842_HIST_SLOTS - 2),
0947 atomic64_read(×[(NX842_HIST_SLOTS - 1)]));
0948 p += bytes;
0949
0950 rcu_read_unlock();
0951 return p - buf;
0952 }
0953
0954 static struct attribute *nx842_sysfs_entries[] = {
0955 &dev_attr_comp_complete.attr,
0956 &dev_attr_comp_failed.attr,
0957 &dev_attr_decomp_complete.attr,
0958 &dev_attr_decomp_failed.attr,
0959 &dev_attr_swdecomp.attr,
0960 &dev_attr_comp_times.attr,
0961 &dev_attr_decomp_times.attr,
0962 NULL,
0963 };
0964
0965 static const struct attribute_group nx842_attribute_group = {
0966 .name = NULL,
0967 .attrs = nx842_sysfs_entries,
0968 };
0969
0970 #define nxcop_caps_read(_name) \
0971 static ssize_t nxcop_##_name##_show(struct device *dev, \
0972 struct device_attribute *attr, char *buf) \
0973 { \
0974 return sprintf(buf, "%lld\n", nx_cop_caps._name); \
0975 }
0976
0977 #define NXCT_ATTR_RO(_name) \
0978 nxcop_caps_read(_name); \
0979 static struct device_attribute dev_attr_##_name = __ATTR(_name, \
0980 0444, \
0981 nxcop_##_name##_show, \
0982 NULL);
0983
0984 NXCT_ATTR_RO(req_max_processed_len);
0985 NXCT_ATTR_RO(min_compress_len);
0986 NXCT_ATTR_RO(min_decompress_len);
0987
0988 static struct attribute *nxcop_caps_sysfs_entries[] = {
0989 &dev_attr_req_max_processed_len.attr,
0990 &dev_attr_min_compress_len.attr,
0991 &dev_attr_min_decompress_len.attr,
0992 NULL,
0993 };
0994
0995 static const struct attribute_group nxcop_caps_attr_group = {
0996 .name = "nx_gzip_caps",
0997 .attrs = nxcop_caps_sysfs_entries,
0998 };
0999
1000 static struct nx842_driver nx842_pseries_driver = {
1001 .name = KBUILD_MODNAME,
1002 .owner = THIS_MODULE,
1003 .workmem_size = sizeof(struct nx842_workmem),
1004 .constraints = &nx842_pseries_constraints,
1005 .compress = nx842_pseries_compress,
1006 .decompress = nx842_pseries_decompress,
1007 };
1008
1009 static int nx842_pseries_crypto_init(struct crypto_tfm *tfm)
1010 {
1011 return nx842_crypto_init(tfm, &nx842_pseries_driver);
1012 }
1013
1014 static struct crypto_alg nx842_pseries_alg = {
1015 .cra_name = "842",
1016 .cra_driver_name = "842-nx",
1017 .cra_priority = 300,
1018 .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
1019 .cra_ctxsize = sizeof(struct nx842_crypto_ctx),
1020 .cra_module = THIS_MODULE,
1021 .cra_init = nx842_pseries_crypto_init,
1022 .cra_exit = nx842_crypto_exit,
1023 .cra_u = { .compress = {
1024 .coa_compress = nx842_crypto_compress,
1025 .coa_decompress = nx842_crypto_decompress } }
1026 };
1027
1028 static int nx842_probe(struct vio_dev *viodev,
1029 const struct vio_device_id *id)
1030 {
1031 struct nx842_devdata *old_devdata, *new_devdata = NULL;
1032 unsigned long flags;
1033 int ret = 0;
1034
1035 new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
1036 if (!new_devdata)
1037 return -ENOMEM;
1038
1039 new_devdata->counters = kzalloc(sizeof(*new_devdata->counters),
1040 GFP_NOFS);
1041 if (!new_devdata->counters) {
1042 kfree(new_devdata);
1043 return -ENOMEM;
1044 }
1045
1046 spin_lock_irqsave(&devdata_mutex, flags);
1047 old_devdata = rcu_dereference_check(devdata,
1048 lockdep_is_held(&devdata_mutex));
1049
1050 if (old_devdata && old_devdata->vdev != NULL) {
1051 dev_err(&viodev->dev, "%s: Attempt to register more than one instance of the hardware\n", __func__);
1052 ret = -1;
1053 goto error_unlock;
1054 }
1055
1056 dev_set_drvdata(&viodev->dev, NULL);
1057
1058 new_devdata->vdev = viodev;
1059 new_devdata->dev = &viodev->dev;
1060 nx842_OF_set_defaults(new_devdata);
1061
1062 rcu_assign_pointer(devdata, new_devdata);
1063 spin_unlock_irqrestore(&devdata_mutex, flags);
1064 synchronize_rcu();
1065 kfree(old_devdata);
1066
1067 of_reconfig_notifier_register(&nx842_of_nb);
1068
1069 ret = nx842_OF_upd(NULL);
1070 if (ret)
1071 goto error;
1072
1073 ret = crypto_register_alg(&nx842_pseries_alg);
1074 if (ret) {
1075 dev_err(&viodev->dev, "could not register comp alg: %d\n", ret);
1076 goto error;
1077 }
1078
1079 rcu_read_lock();
1080 dev_set_drvdata(&viodev->dev, rcu_dereference(devdata));
1081 rcu_read_unlock();
1082
1083 if (sysfs_create_group(&viodev->dev.kobj, &nx842_attribute_group)) {
1084 dev_err(&viodev->dev, "could not create sysfs device attributes\n");
1085 ret = -1;
1086 goto error;
1087 }
1088
1089 if (caps_feat) {
1090 if (sysfs_create_group(&viodev->dev.kobj,
1091 &nxcop_caps_attr_group)) {
1092 dev_err(&viodev->dev,
1093 "Could not create sysfs NX capability entries\n");
1094 ret = -1;
1095 goto error;
1096 }
1097 }
1098
1099 return 0;
1100
1101 error_unlock:
1102 spin_unlock_irqrestore(&devdata_mutex, flags);
1103 if (new_devdata)
1104 kfree(new_devdata->counters);
1105 kfree(new_devdata);
1106 error:
1107 return ret;
1108 }
1109
1110 static void nx842_remove(struct vio_dev *viodev)
1111 {
1112 struct nx842_devdata *old_devdata;
1113 unsigned long flags;
1114
1115 pr_info("Removing IBM Power 842 compression device\n");
1116 sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group);
1117
1118 if (caps_feat)
1119 sysfs_remove_group(&viodev->dev.kobj, &nxcop_caps_attr_group);
1120
1121 crypto_unregister_alg(&nx842_pseries_alg);
1122
1123 spin_lock_irqsave(&devdata_mutex, flags);
1124 old_devdata = rcu_dereference_check(devdata,
1125 lockdep_is_held(&devdata_mutex));
1126 of_reconfig_notifier_unregister(&nx842_of_nb);
1127 RCU_INIT_POINTER(devdata, NULL);
1128 spin_unlock_irqrestore(&devdata_mutex, flags);
1129 synchronize_rcu();
1130 dev_set_drvdata(&viodev->dev, NULL);
1131 if (old_devdata)
1132 kfree(old_devdata->counters);
1133 kfree(old_devdata);
1134 }
1135
1136
1137
1138
1139
1140
1141 static void __init nxcop_get_capabilities(void)
1142 {
1143 struct hv_vas_all_caps *hv_caps;
1144 struct hv_nx_cop_caps *hv_nxc;
1145 int rc;
1146
1147 hv_caps = kmalloc(sizeof(*hv_caps), GFP_KERNEL);
1148 if (!hv_caps)
1149 return;
1150
1151
1152
1153 rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, 0,
1154 (u64)virt_to_phys(hv_caps));
1155 if (rc)
1156 goto out;
1157
1158 caps_feat = be64_to_cpu(hv_caps->feat_type);
1159
1160
1161
1162 if (caps_feat & VAS_NX_GZIP_FEAT_BIT) {
1163 hv_nxc = kmalloc(sizeof(*hv_nxc), GFP_KERNEL);
1164 if (!hv_nxc)
1165 goto out;
1166
1167
1168
1169 rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES,
1170 VAS_NX_GZIP_FEAT,
1171 (u64)virt_to_phys(hv_nxc));
1172 } else {
1173 pr_err("NX-GZIP feature is not available\n");
1174 rc = -EINVAL;
1175 }
1176
1177 if (!rc) {
1178 nx_cop_caps.descriptor = be64_to_cpu(hv_nxc->descriptor);
1179 nx_cop_caps.req_max_processed_len =
1180 be64_to_cpu(hv_nxc->req_max_processed_len);
1181 nx_cop_caps.min_compress_len =
1182 be64_to_cpu(hv_nxc->min_compress_len);
1183 nx_cop_caps.min_decompress_len =
1184 be64_to_cpu(hv_nxc->min_decompress_len);
1185 } else {
1186 caps_feat = 0;
1187 }
1188
1189 kfree(hv_nxc);
1190 out:
1191 kfree(hv_caps);
1192 }
1193
1194 static const struct vio_device_id nx842_vio_driver_ids[] = {
1195 {"ibm,compression-v1", "ibm,compression"},
1196 {"", ""},
1197 };
1198 MODULE_DEVICE_TABLE(vio, nx842_vio_driver_ids);
1199
1200 static struct vio_driver nx842_vio_driver = {
1201 .name = KBUILD_MODNAME,
1202 .probe = nx842_probe,
1203 .remove = nx842_remove,
1204 .get_desired_dma = nx842_get_desired_dma,
1205 .id_table = nx842_vio_driver_ids,
1206 };
1207
1208 static int __init nx842_pseries_init(void)
1209 {
1210 struct nx842_devdata *new_devdata;
1211 struct device_node *np;
1212 int ret;
1213
1214 np = of_find_compatible_node(NULL, NULL, "ibm,compression");
1215 if (!np)
1216 return -ENODEV;
1217 of_node_put(np);
1218
1219 RCU_INIT_POINTER(devdata, NULL);
1220 new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL);
1221 if (!new_devdata)
1222 return -ENOMEM;
1223
1224 RCU_INIT_POINTER(devdata, new_devdata);
1225
1226
1227
1228 nxcop_get_capabilities();
1229
1230 ret = vio_register_driver(&nx842_vio_driver);
1231 if (ret) {
1232 pr_err("Could not register VIO driver %d\n", ret);
1233
1234 kfree(new_devdata);
1235 return ret;
1236 }
1237
1238 ret = vas_register_api_pseries(THIS_MODULE, VAS_COP_TYPE_GZIP,
1239 "nx-gzip");
1240
1241 if (ret)
1242 pr_err("NX-GZIP is not supported. Returned=%d\n", ret);
1243
1244 return 0;
1245 }
1246
1247 module_init(nx842_pseries_init);
1248
1249 static void __exit nx842_pseries_exit(void)
1250 {
1251 struct nx842_devdata *old_devdata;
1252 unsigned long flags;
1253
1254 vas_unregister_api_pseries();
1255
1256 crypto_unregister_alg(&nx842_pseries_alg);
1257
1258 spin_lock_irqsave(&devdata_mutex, flags);
1259 old_devdata = rcu_dereference_check(devdata,
1260 lockdep_is_held(&devdata_mutex));
1261 RCU_INIT_POINTER(devdata, NULL);
1262 spin_unlock_irqrestore(&devdata_mutex, flags);
1263 synchronize_rcu();
1264 if (old_devdata && old_devdata->dev)
1265 dev_set_drvdata(old_devdata->dev, NULL);
1266 kfree(old_devdata);
1267 vio_unregister_driver(&nx842_vio_driver);
1268 }
1269
1270 module_exit(nx842_pseries_exit);
1271