0001
0002
0003
0004
0005
0006
0007
0008 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0009
0010 #include <linux/types.h>
0011 #include <linux/module.h>
0012 #include <linux/device.h>
0013 #include <linux/io.h>
0014 #include <linux/mm.h>
0015 #include <linux/slab.h>
0016 #include <linux/bitmap.h>
0017 #include <linux/pm_runtime.h>
0018
0019 #include "intel_th.h"
0020 #include "gth.h"
0021
0022 struct gth_device;
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032 struct gth_output {
0033 struct gth_device *gth;
0034 struct intel_th_output *output;
0035 unsigned int index;
0036 unsigned int port_type;
0037 DECLARE_BITMAP(master, TH_CONFIGURABLE_MASTERS + 1);
0038 };
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050 struct gth_device {
0051 struct device *dev;
0052 void __iomem *base;
0053
0054 struct attribute_group output_group;
0055 struct attribute_group master_group;
0056 struct gth_output output[TH_POSSIBLE_OUTPUTS];
0057 signed char master[TH_CONFIGURABLE_MASTERS + 1];
0058 spinlock_t gth_lock;
0059 };
0060
0061 static void gth_output_set(struct gth_device *gth, int port,
0062 unsigned int config)
0063 {
0064 unsigned long reg = port & 4 ? REG_GTH_GTHOPT1 : REG_GTH_GTHOPT0;
0065 u32 val;
0066 int shift = (port & 3) * 8;
0067
0068 val = ioread32(gth->base + reg);
0069 val &= ~(0xff << shift);
0070 val |= config << shift;
0071 iowrite32(val, gth->base + reg);
0072 }
0073
0074 static unsigned int gth_output_get(struct gth_device *gth, int port)
0075 {
0076 unsigned long reg = port & 4 ? REG_GTH_GTHOPT1 : REG_GTH_GTHOPT0;
0077 u32 val;
0078 int shift = (port & 3) * 8;
0079
0080 val = ioread32(gth->base + reg);
0081 val &= 0xff << shift;
0082 val >>= shift;
0083
0084 return val;
0085 }
0086
0087 static void gth_smcfreq_set(struct gth_device *gth, int port,
0088 unsigned int freq)
0089 {
0090 unsigned long reg = REG_GTH_SMCR0 + ((port / 2) * 4);
0091 int shift = (port & 1) * 16;
0092 u32 val;
0093
0094 val = ioread32(gth->base + reg);
0095 val &= ~(0xffff << shift);
0096 val |= freq << shift;
0097 iowrite32(val, gth->base + reg);
0098 }
0099
0100 static unsigned int gth_smcfreq_get(struct gth_device *gth, int port)
0101 {
0102 unsigned long reg = REG_GTH_SMCR0 + ((port / 2) * 4);
0103 int shift = (port & 1) * 16;
0104 u32 val;
0105
0106 val = ioread32(gth->base + reg);
0107 val &= 0xffff << shift;
0108 val >>= shift;
0109
0110 return val;
0111 }
0112
0113
0114
0115
0116
0117 struct master_attribute {
0118 struct device_attribute attr;
0119 struct gth_device *gth;
0120 unsigned int master;
0121 };
0122
0123 static void
0124 gth_master_set(struct gth_device *gth, unsigned int master, int port)
0125 {
0126 unsigned int reg = REG_GTH_SWDEST0 + ((master >> 1) & ~3u);
0127 unsigned int shift = (master & 0x7) * 4;
0128 u32 val;
0129
0130 if (master >= 256) {
0131 reg = REG_GTH_GSWTDEST;
0132 shift = 0;
0133 }
0134
0135 val = ioread32(gth->base + reg);
0136 val &= ~(0xf << shift);
0137 if (port >= 0)
0138 val |= (0x8 | port) << shift;
0139 iowrite32(val, gth->base + reg);
0140 }
0141
0142 static ssize_t master_attr_show(struct device *dev,
0143 struct device_attribute *attr,
0144 char *buf)
0145 {
0146 struct master_attribute *ma =
0147 container_of(attr, struct master_attribute, attr);
0148 struct gth_device *gth = ma->gth;
0149 size_t count;
0150 int port;
0151
0152 spin_lock(>h->gth_lock);
0153 port = gth->master[ma->master];
0154 spin_unlock(>h->gth_lock);
0155
0156 if (port >= 0)
0157 count = snprintf(buf, PAGE_SIZE, "%x\n", port);
0158 else
0159 count = snprintf(buf, PAGE_SIZE, "disabled\n");
0160
0161 return count;
0162 }
0163
0164 static ssize_t master_attr_store(struct device *dev,
0165 struct device_attribute *attr,
0166 const char *buf, size_t count)
0167 {
0168 struct master_attribute *ma =
0169 container_of(attr, struct master_attribute, attr);
0170 struct gth_device *gth = ma->gth;
0171 int old_port, port;
0172
0173 if (kstrtoint(buf, 10, &port) < 0)
0174 return -EINVAL;
0175
0176 if (port >= TH_POSSIBLE_OUTPUTS || port < -1)
0177 return -EINVAL;
0178
0179 spin_lock(>h->gth_lock);
0180
0181
0182 old_port = gth->master[ma->master];
0183 if (old_port >= 0) {
0184 gth->master[ma->master] = -1;
0185 clear_bit(ma->master, gth->output[old_port].master);
0186
0187
0188
0189
0190
0191 if (gth->output[old_port].output->active)
0192 gth_master_set(gth, ma->master, -1);
0193 }
0194
0195
0196 if (port >= 0) {
0197
0198 if (!gth->output[port].output) {
0199 count = -ENODEV;
0200 goto unlock;
0201 }
0202
0203 set_bit(ma->master, gth->output[port].master);
0204
0205
0206 if (gth->output[port].output->active)
0207 gth_master_set(gth, ma->master, port);
0208 }
0209
0210 gth->master[ma->master] = port;
0211
0212 unlock:
0213 spin_unlock(>h->gth_lock);
0214
0215 return count;
0216 }
0217
0218 struct output_attribute {
0219 struct device_attribute attr;
0220 struct gth_device *gth;
0221 unsigned int port;
0222 unsigned int parm;
0223 };
0224
0225 #define OUTPUT_PARM(_name, _mask, _r, _w, _what) \
0226 [TH_OUTPUT_PARM(_name)] = { .name = __stringify(_name), \
0227 .get = gth_ ## _what ## _get, \
0228 .set = gth_ ## _what ## _set, \
0229 .mask = (_mask), \
0230 .readable = (_r), \
0231 .writable = (_w) }
0232
0233 static const struct output_parm {
0234 const char *name;
0235 unsigned int (*get)(struct gth_device *gth, int port);
0236 void (*set)(struct gth_device *gth, int port,
0237 unsigned int val);
0238 unsigned int mask;
0239 unsigned int readable : 1,
0240 writable : 1;
0241 } output_parms[] = {
0242 OUTPUT_PARM(port, 0x7, 1, 0, output),
0243 OUTPUT_PARM(null, BIT(3), 1, 1, output),
0244 OUTPUT_PARM(drop, BIT(4), 1, 1, output),
0245 OUTPUT_PARM(reset, BIT(5), 1, 0, output),
0246 OUTPUT_PARM(flush, BIT(7), 0, 1, output),
0247 OUTPUT_PARM(smcfreq, 0xffff, 1, 1, smcfreq),
0248 };
0249
0250 static void
0251 gth_output_parm_set(struct gth_device *gth, int port, unsigned int parm,
0252 unsigned int val)
0253 {
0254 unsigned int config = output_parms[parm].get(gth, port);
0255 unsigned int mask = output_parms[parm].mask;
0256 unsigned int shift = __ffs(mask);
0257
0258 config &= ~mask;
0259 config |= (val << shift) & mask;
0260 output_parms[parm].set(gth, port, config);
0261 }
0262
0263 static unsigned int
0264 gth_output_parm_get(struct gth_device *gth, int port, unsigned int parm)
0265 {
0266 unsigned int config = output_parms[parm].get(gth, port);
0267 unsigned int mask = output_parms[parm].mask;
0268 unsigned int shift = __ffs(mask);
0269
0270 config &= mask;
0271 config >>= shift;
0272 return config;
0273 }
0274
0275
0276
0277
0278 static int intel_th_gth_reset(struct gth_device *gth)
0279 {
0280 u32 reg;
0281 int port, i;
0282
0283 reg = ioread32(gth->base + REG_GTH_SCRPD0);
0284 if (reg & SCRPD_DEBUGGER_IN_USE)
0285 return -EBUSY;
0286
0287
0288 reg |= SCRPD_STH_IS_ENABLED | SCRPD_TRIGGER_IS_ENABLED;
0289 iowrite32(reg, gth->base + REG_GTH_SCRPD0);
0290
0291
0292 for (port = 0; port < 8; port++) {
0293 if (gth_output_parm_get(gth, port, TH_OUTPUT_PARM(port)) ==
0294 GTH_NONE)
0295 continue;
0296
0297 gth_output_set(gth, port, 0);
0298 gth_smcfreq_set(gth, port, 16);
0299 }
0300
0301 iowrite32(0, gth->base + REG_GTH_DESTOVR);
0302
0303
0304 for (i = 0; i < 33; i++)
0305 iowrite32(0, gth->base + REG_GTH_SWDEST0 + i * 4);
0306
0307
0308 iowrite32(0, gth->base + REG_GTH_SCR);
0309 iowrite32(0xfc, gth->base + REG_GTH_SCR2);
0310
0311
0312 iowrite32(CTS_EVENT_ENABLE_IF_ANYTHING, gth->base + REG_CTS_C0S0_EN);
0313 iowrite32(CTS_ACTION_CONTROL_SET_STATE(CTS_STATE_IDLE) |
0314 CTS_ACTION_CONTROL_TRIGGER, gth->base + REG_CTS_C0S0_ACT);
0315
0316 return 0;
0317 }
0318
0319
0320
0321
0322
0323 static ssize_t output_attr_show(struct device *dev,
0324 struct device_attribute *attr,
0325 char *buf)
0326 {
0327 struct output_attribute *oa =
0328 container_of(attr, struct output_attribute, attr);
0329 struct gth_device *gth = oa->gth;
0330 size_t count;
0331
0332 pm_runtime_get_sync(dev);
0333
0334 spin_lock(>h->gth_lock);
0335 count = snprintf(buf, PAGE_SIZE, "%x\n",
0336 gth_output_parm_get(gth, oa->port, oa->parm));
0337 spin_unlock(>h->gth_lock);
0338
0339 pm_runtime_put(dev);
0340
0341 return count;
0342 }
0343
0344 static ssize_t output_attr_store(struct device *dev,
0345 struct device_attribute *attr,
0346 const char *buf, size_t count)
0347 {
0348 struct output_attribute *oa =
0349 container_of(attr, struct output_attribute, attr);
0350 struct gth_device *gth = oa->gth;
0351 unsigned int config;
0352
0353 if (kstrtouint(buf, 16, &config) < 0)
0354 return -EINVAL;
0355
0356 pm_runtime_get_sync(dev);
0357
0358 spin_lock(>h->gth_lock);
0359 gth_output_parm_set(gth, oa->port, oa->parm, config);
0360 spin_unlock(>h->gth_lock);
0361
0362 pm_runtime_put(dev);
0363
0364 return count;
0365 }
0366
0367 static int intel_th_master_attributes(struct gth_device *gth)
0368 {
0369 struct master_attribute *master_attrs;
0370 struct attribute **attrs;
0371 int i, nattrs = TH_CONFIGURABLE_MASTERS + 2;
0372
0373 attrs = devm_kcalloc(gth->dev, nattrs, sizeof(void *), GFP_KERNEL);
0374 if (!attrs)
0375 return -ENOMEM;
0376
0377 master_attrs = devm_kcalloc(gth->dev, nattrs,
0378 sizeof(struct master_attribute),
0379 GFP_KERNEL);
0380 if (!master_attrs)
0381 return -ENOMEM;
0382
0383 for (i = 0; i < TH_CONFIGURABLE_MASTERS + 1; i++) {
0384 char *name;
0385
0386 name = devm_kasprintf(gth->dev, GFP_KERNEL, "%d%s", i,
0387 i == TH_CONFIGURABLE_MASTERS ? "+" : "");
0388 if (!name)
0389 return -ENOMEM;
0390
0391 master_attrs[i].attr.attr.name = name;
0392 master_attrs[i].attr.attr.mode = S_IRUGO | S_IWUSR;
0393 master_attrs[i].attr.show = master_attr_show;
0394 master_attrs[i].attr.store = master_attr_store;
0395
0396 sysfs_attr_init(&master_attrs[i].attr.attr);
0397 attrs[i] = &master_attrs[i].attr.attr;
0398
0399 master_attrs[i].gth = gth;
0400 master_attrs[i].master = i;
0401 }
0402
0403 gth->master_group.name = "masters";
0404 gth->master_group.attrs = attrs;
0405
0406 return sysfs_create_group(>h->dev->kobj, >h->master_group);
0407 }
0408
0409 static int intel_th_output_attributes(struct gth_device *gth)
0410 {
0411 struct output_attribute *out_attrs;
0412 struct attribute **attrs;
0413 int i, j, nouts = TH_POSSIBLE_OUTPUTS;
0414 int nparms = ARRAY_SIZE(output_parms);
0415 int nattrs = nouts * nparms + 1;
0416
0417 attrs = devm_kcalloc(gth->dev, nattrs, sizeof(void *), GFP_KERNEL);
0418 if (!attrs)
0419 return -ENOMEM;
0420
0421 out_attrs = devm_kcalloc(gth->dev, nattrs,
0422 sizeof(struct output_attribute),
0423 GFP_KERNEL);
0424 if (!out_attrs)
0425 return -ENOMEM;
0426
0427 for (i = 0; i < nouts; i++) {
0428 for (j = 0; j < nparms; j++) {
0429 unsigned int idx = i * nparms + j;
0430 char *name;
0431
0432 name = devm_kasprintf(gth->dev, GFP_KERNEL, "%d_%s", i,
0433 output_parms[j].name);
0434 if (!name)
0435 return -ENOMEM;
0436
0437 out_attrs[idx].attr.attr.name = name;
0438
0439 if (output_parms[j].readable) {
0440 out_attrs[idx].attr.attr.mode |= S_IRUGO;
0441 out_attrs[idx].attr.show = output_attr_show;
0442 }
0443
0444 if (output_parms[j].writable) {
0445 out_attrs[idx].attr.attr.mode |= S_IWUSR;
0446 out_attrs[idx].attr.store = output_attr_store;
0447 }
0448
0449 sysfs_attr_init(&out_attrs[idx].attr.attr);
0450 attrs[idx] = &out_attrs[idx].attr.attr;
0451
0452 out_attrs[idx].gth = gth;
0453 out_attrs[idx].port = i;
0454 out_attrs[idx].parm = j;
0455 }
0456 }
0457
0458 gth->output_group.name = "outputs";
0459 gth->output_group.attrs = attrs;
0460
0461 return sysfs_create_group(>h->dev->kobj, >h->output_group);
0462 }
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473 static void intel_th_gth_stop(struct gth_device *gth,
0474 struct intel_th_output *output,
0475 bool capture_done)
0476 {
0477 struct intel_th_device *outdev =
0478 container_of(output, struct intel_th_device, output);
0479 struct intel_th_driver *outdrv =
0480 to_intel_th_driver(outdev->dev.driver);
0481 unsigned long count;
0482 u32 reg;
0483 u32 scr2 = 0xfc | (capture_done ? 1 : 0);
0484
0485 iowrite32(0, gth->base + REG_GTH_SCR);
0486 iowrite32(scr2, gth->base + REG_GTH_SCR2);
0487
0488
0489 for (reg = 0, count = GTH_PLE_WAITLOOP_DEPTH;
0490 count && !(reg & BIT(output->port)); count--) {
0491 reg = ioread32(gth->base + REG_GTH_STAT);
0492 cpu_relax();
0493 }
0494
0495 if (!count)
0496 dev_dbg(gth->dev, "timeout waiting for GTH[%d] PLE\n",
0497 output->port);
0498
0499
0500 if (outdrv->wait_empty)
0501 outdrv->wait_empty(outdev);
0502
0503
0504 iowrite32(0xfc, gth->base + REG_GTH_SCR2);
0505 }
0506
0507
0508
0509
0510
0511
0512
0513
0514 static void intel_th_gth_start(struct gth_device *gth,
0515 struct intel_th_output *output)
0516 {
0517 u32 scr = 0xfc0000;
0518
0519 if (output->multiblock)
0520 scr |= 0xff;
0521
0522 iowrite32(scr, gth->base + REG_GTH_SCR);
0523 iowrite32(0, gth->base + REG_GTH_SCR2);
0524 }
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535 static void intel_th_gth_disable(struct intel_th_device *thdev,
0536 struct intel_th_output *output)
0537 {
0538 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
0539 int master;
0540 u32 reg;
0541
0542 spin_lock(>h->gth_lock);
0543 output->active = false;
0544
0545 for_each_set_bit(master, gth->output[output->port].master,
0546 TH_CONFIGURABLE_MASTERS + 1) {
0547 gth_master_set(gth, master, -1);
0548 }
0549 spin_unlock(>h->gth_lock);
0550
0551 intel_th_gth_stop(gth, output, true);
0552
0553 reg = ioread32(gth->base + REG_GTH_SCRPD0);
0554 reg &= ~output->scratchpad;
0555 iowrite32(reg, gth->base + REG_GTH_SCRPD0);
0556 }
0557
0558 static void gth_tscu_resync(struct gth_device *gth)
0559 {
0560 u32 reg;
0561
0562 reg = ioread32(gth->base + REG_TSCU_TSUCTRL);
0563 reg &= ~TSUCTRL_CTCRESYNC;
0564 iowrite32(reg, gth->base + REG_TSCU_TSUCTRL);
0565 }
0566
0567 static void intel_th_gth_prepare(struct intel_th_device *thdev,
0568 struct intel_th_output *output)
0569 {
0570 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
0571 int count;
0572
0573
0574
0575
0576
0577 for (count = GTH_PLE_WAITLOOP_DEPTH;
0578 count && !(gth_output_get(gth, output->port) & BIT(5)); count--)
0579 cpu_relax();
0580 }
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590 static void intel_th_gth_enable(struct intel_th_device *thdev,
0591 struct intel_th_output *output)
0592 {
0593 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
0594 struct intel_th *th = to_intel_th(thdev);
0595 int master;
0596 u32 scrpd;
0597
0598 spin_lock(>h->gth_lock);
0599 for_each_set_bit(master, gth->output[output->port].master,
0600 TH_CONFIGURABLE_MASTERS + 1) {
0601 gth_master_set(gth, master, output->port);
0602 }
0603
0604 output->active = true;
0605 spin_unlock(>h->gth_lock);
0606
0607 if (INTEL_TH_CAP(th, tscu_enable))
0608 gth_tscu_resync(gth);
0609
0610 scrpd = ioread32(gth->base + REG_GTH_SCRPD0);
0611 scrpd |= output->scratchpad;
0612 iowrite32(scrpd, gth->base + REG_GTH_SCRPD0);
0613
0614 intel_th_gth_start(gth, output);
0615 }
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625 static void intel_th_gth_switch(struct intel_th_device *thdev,
0626 struct intel_th_output *output)
0627 {
0628 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
0629 unsigned long count;
0630 u32 reg;
0631
0632
0633 iowrite32(0, gth->base + REG_CTS_CTL);
0634 iowrite32(CTS_CTL_SEQUENCER_ENABLE, gth->base + REG_CTS_CTL);
0635
0636 for (reg = 0, count = CTS_TRIG_WAITLOOP_DEPTH;
0637 count && !(reg & BIT(4)); count--) {
0638 reg = ioread32(gth->base + REG_CTS_STAT);
0639 cpu_relax();
0640 }
0641 if (!count)
0642 dev_dbg(&thdev->dev, "timeout waiting for CTS Trigger\n");
0643
0644
0645 iowrite32(0, gth->base + REG_CTS_CTL);
0646
0647 intel_th_gth_stop(gth, output, false);
0648 intel_th_gth_start(gth, output);
0649 }
0650
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661
0662 static int intel_th_gth_assign(struct intel_th_device *thdev,
0663 struct intel_th_device *othdev)
0664 {
0665 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
0666 int i, id;
0667
0668 if (thdev->host_mode)
0669 return -EBUSY;
0670
0671 if (othdev->type != INTEL_TH_OUTPUT)
0672 return -EINVAL;
0673
0674 for (i = 0, id = 0; i < TH_POSSIBLE_OUTPUTS; i++) {
0675 if (gth->output[i].port_type != othdev->output.type)
0676 continue;
0677
0678 if (othdev->id == -1 || othdev->id == id)
0679 goto found;
0680
0681 id++;
0682 }
0683
0684 return -ENOENT;
0685
0686 found:
0687 spin_lock(>h->gth_lock);
0688 othdev->output.port = i;
0689 othdev->output.active = false;
0690 gth->output[i].output = &othdev->output;
0691 spin_unlock(>h->gth_lock);
0692
0693 return 0;
0694 }
0695
0696
0697
0698
0699
0700
0701 static void intel_th_gth_unassign(struct intel_th_device *thdev,
0702 struct intel_th_device *othdev)
0703 {
0704 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
0705 int port = othdev->output.port;
0706 int master;
0707
0708 if (thdev->host_mode)
0709 return;
0710
0711 spin_lock(>h->gth_lock);
0712 othdev->output.port = -1;
0713 othdev->output.active = false;
0714 gth->output[port].output = NULL;
0715 for (master = 0; master < TH_CONFIGURABLE_MASTERS + 1; master++)
0716 if (gth->master[master] == port)
0717 gth->master[master] = -1;
0718 spin_unlock(>h->gth_lock);
0719 }
0720
0721 static int
0722 intel_th_gth_set_output(struct intel_th_device *thdev, unsigned int master)
0723 {
0724 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
0725 int port = 0;
0726
0727
0728
0729
0730
0731 if (master > TH_CONFIGURABLE_MASTERS)
0732 master = TH_CONFIGURABLE_MASTERS;
0733
0734 spin_lock(>h->gth_lock);
0735 if (gth->master[master] == -1) {
0736 set_bit(master, gth->output[port].master);
0737 gth->master[master] = port;
0738 }
0739 spin_unlock(>h->gth_lock);
0740
0741 return 0;
0742 }
0743
0744 static int intel_th_gth_probe(struct intel_th_device *thdev)
0745 {
0746 struct device *dev = &thdev->dev;
0747 struct intel_th *th = dev_get_drvdata(dev->parent);
0748 struct gth_device *gth;
0749 struct resource *res;
0750 void __iomem *base;
0751 int i, ret;
0752
0753 res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0);
0754 if (!res)
0755 return -ENODEV;
0756
0757 base = devm_ioremap(dev, res->start, resource_size(res));
0758 if (!base)
0759 return -ENOMEM;
0760
0761 gth = devm_kzalloc(dev, sizeof(*gth), GFP_KERNEL);
0762 if (!gth)
0763 return -ENOMEM;
0764
0765 gth->dev = dev;
0766 gth->base = base;
0767 spin_lock_init(>h->gth_lock);
0768
0769 dev_set_drvdata(dev, gth);
0770
0771
0772
0773
0774
0775
0776
0777 if (thdev->host_mode)
0778 return 0;
0779
0780 ret = intel_th_gth_reset(gth);
0781 if (ret) {
0782 if (ret != -EBUSY)
0783 return ret;
0784
0785 thdev->host_mode = true;
0786
0787 return 0;
0788 }
0789
0790 for (i = 0; i < TH_CONFIGURABLE_MASTERS + 1; i++)
0791 gth->master[i] = -1;
0792
0793 for (i = 0; i < TH_POSSIBLE_OUTPUTS; i++) {
0794 gth->output[i].gth = gth;
0795 gth->output[i].index = i;
0796 gth->output[i].port_type =
0797 gth_output_parm_get(gth, i, TH_OUTPUT_PARM(port));
0798 if (gth->output[i].port_type == GTH_NONE)
0799 continue;
0800
0801 ret = intel_th_output_enable(th, gth->output[i].port_type);
0802
0803 if (ret && ret != -ENODEV)
0804 return ret;
0805 }
0806
0807 if (intel_th_output_attributes(gth) ||
0808 intel_th_master_attributes(gth)) {
0809 pr_warn("Can't initialize sysfs attributes\n");
0810
0811 if (gth->output_group.attrs)
0812 sysfs_remove_group(>h->dev->kobj, >h->output_group);
0813 return -ENOMEM;
0814 }
0815
0816 return 0;
0817 }
0818
0819 static void intel_th_gth_remove(struct intel_th_device *thdev)
0820 {
0821 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
0822
0823 sysfs_remove_group(>h->dev->kobj, >h->output_group);
0824 sysfs_remove_group(>h->dev->kobj, >h->master_group);
0825 }
0826
0827 static struct intel_th_driver intel_th_gth_driver = {
0828 .probe = intel_th_gth_probe,
0829 .remove = intel_th_gth_remove,
0830 .assign = intel_th_gth_assign,
0831 .unassign = intel_th_gth_unassign,
0832 .set_output = intel_th_gth_set_output,
0833 .prepare = intel_th_gth_prepare,
0834 .enable = intel_th_gth_enable,
0835 .trig_switch = intel_th_gth_switch,
0836 .disable = intel_th_gth_disable,
0837 .driver = {
0838 .name = "gth",
0839 .owner = THIS_MODULE,
0840 },
0841 };
0842
0843 module_driver(intel_th_gth_driver,
0844 intel_th_driver_register,
0845 intel_th_driver_unregister);
0846
0847 MODULE_ALIAS("intel_th_switch");
0848 MODULE_LICENSE("GPL v2");
0849 MODULE_DESCRIPTION("Intel(R) Trace Hub Global Trace Hub driver");
0850 MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");