0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #ifndef THUNDERBOLT_H_
0012 #define THUNDERBOLT_H_
0013
0014 #include <linux/device.h>
0015 #include <linux/idr.h>
0016 #include <linux/list.h>
0017 #include <linux/mutex.h>
0018 #include <linux/mod_devicetable.h>
0019 #include <linux/pci.h>
0020 #include <linux/uuid.h>
0021 #include <linux/workqueue.h>
0022
0023 enum tb_cfg_pkg_type {
0024 TB_CFG_PKG_READ = 1,
0025 TB_CFG_PKG_WRITE = 2,
0026 TB_CFG_PKG_ERROR = 3,
0027 TB_CFG_PKG_NOTIFY_ACK = 4,
0028 TB_CFG_PKG_EVENT = 5,
0029 TB_CFG_PKG_XDOMAIN_REQ = 6,
0030 TB_CFG_PKG_XDOMAIN_RESP = 7,
0031 TB_CFG_PKG_OVERRIDE = 8,
0032 TB_CFG_PKG_RESET = 9,
0033 TB_CFG_PKG_ICM_EVENT = 10,
0034 TB_CFG_PKG_ICM_CMD = 11,
0035 TB_CFG_PKG_ICM_RESP = 12,
0036 TB_CFG_PKG_PREPARE_TO_SLEEP = 13,
0037 };
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051 enum tb_security_level {
0052 TB_SECURITY_NONE,
0053 TB_SECURITY_USER,
0054 TB_SECURITY_SECURE,
0055 TB_SECURITY_DPONLY,
0056 TB_SECURITY_USBONLY,
0057 TB_SECURITY_NOPCIE,
0058 };
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075 struct tb {
0076 struct device dev;
0077 struct mutex lock;
0078 struct tb_nhi *nhi;
0079 struct tb_ctl *ctl;
0080 struct workqueue_struct *wq;
0081 struct tb_switch *root_switch;
0082 const struct tb_cm_ops *cm_ops;
0083 int index;
0084 enum tb_security_level security_level;
0085 size_t nboot_acl;
0086 unsigned long privdata[];
0087 };
0088
0089 extern struct bus_type tb_bus_type;
0090 extern struct device_type tb_service_type;
0091 extern struct device_type tb_xdomain_type;
0092
0093 #define TB_LINKS_PER_PHY_PORT 2
0094
0095 static inline unsigned int tb_phy_port_from_link(unsigned int link)
0096 {
0097 return (link - 1) / TB_LINKS_PER_PHY_PORT;
0098 }
0099
0100
0101
0102
0103
0104
0105
0106
0107 struct tb_property_dir {
0108 const uuid_t *uuid;
0109 struct list_head properties;
0110 };
0111
0112 enum tb_property_type {
0113 TB_PROPERTY_TYPE_UNKNOWN = 0x00,
0114 TB_PROPERTY_TYPE_DIRECTORY = 0x44,
0115 TB_PROPERTY_TYPE_DATA = 0x64,
0116 TB_PROPERTY_TYPE_TEXT = 0x74,
0117 TB_PROPERTY_TYPE_VALUE = 0x76,
0118 };
0119
0120 #define TB_PROPERTY_KEY_SIZE 8
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132 struct tb_property {
0133 struct list_head list;
0134 char key[TB_PROPERTY_KEY_SIZE + 1];
0135 enum tb_property_type type;
0136 size_t length;
0137 union {
0138 struct tb_property_dir *dir;
0139 u8 *data;
0140 char *text;
0141 u32 immediate;
0142 } value;
0143 };
0144
0145 struct tb_property_dir *tb_property_parse_dir(const u32 *block,
0146 size_t block_len);
0147 ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block,
0148 size_t block_len);
0149 struct tb_property_dir *tb_property_copy_dir(const struct tb_property_dir *dir);
0150 struct tb_property_dir *tb_property_create_dir(const uuid_t *uuid);
0151 void tb_property_free_dir(struct tb_property_dir *dir);
0152 int tb_property_add_immediate(struct tb_property_dir *parent, const char *key,
0153 u32 value);
0154 int tb_property_add_data(struct tb_property_dir *parent, const char *key,
0155 const void *buf, size_t buflen);
0156 int tb_property_add_text(struct tb_property_dir *parent, const char *key,
0157 const char *text);
0158 int tb_property_add_dir(struct tb_property_dir *parent, const char *key,
0159 struct tb_property_dir *dir);
0160 void tb_property_remove(struct tb_property *tb_property);
0161 struct tb_property *tb_property_find(struct tb_property_dir *dir,
0162 const char *key, enum tb_property_type type);
0163 struct tb_property *tb_property_get_next(struct tb_property_dir *dir,
0164 struct tb_property *prev);
0165
0166 #define tb_property_for_each(dir, property) \
0167 for (property = tb_property_get_next(dir, NULL); \
0168 property; \
0169 property = tb_property_get_next(dir, property))
0170
0171 int tb_register_property_dir(const char *key, struct tb_property_dir *dir);
0172 void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222 struct tb_xdomain {
0223 struct device dev;
0224 struct tb *tb;
0225 uuid_t *remote_uuid;
0226 const uuid_t *local_uuid;
0227 u64 route;
0228 u16 vendor;
0229 u16 device;
0230 unsigned int local_max_hopid;
0231 unsigned int remote_max_hopid;
0232 struct mutex lock;
0233 const char *vendor_name;
0234 const char *device_name;
0235 unsigned int link_speed;
0236 unsigned int link_width;
0237 bool is_unplugged;
0238 bool needs_uuid;
0239 struct ida service_ids;
0240 struct ida in_hopids;
0241 struct ida out_hopids;
0242 u32 *local_property_block;
0243 u32 local_property_block_gen;
0244 u32 local_property_block_len;
0245 struct tb_property_dir *remote_properties;
0246 u32 remote_property_block_gen;
0247 int state;
0248 struct delayed_work state_work;
0249 int state_retries;
0250 struct delayed_work properties_changed_work;
0251 int properties_changed_retries;
0252 bool bonding_possible;
0253 u8 target_link_width;
0254 u8 link;
0255 u8 depth;
0256 };
0257
0258 int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd);
0259 void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd);
0260 int tb_xdomain_alloc_in_hopid(struct tb_xdomain *xd, int hopid);
0261 void tb_xdomain_release_in_hopid(struct tb_xdomain *xd, int hopid);
0262 int tb_xdomain_alloc_out_hopid(struct tb_xdomain *xd, int hopid);
0263 void tb_xdomain_release_out_hopid(struct tb_xdomain *xd, int hopid);
0264 int tb_xdomain_enable_paths(struct tb_xdomain *xd, int transmit_path,
0265 int transmit_ring, int receive_path,
0266 int receive_ring);
0267 int tb_xdomain_disable_paths(struct tb_xdomain *xd, int transmit_path,
0268 int transmit_ring, int receive_path,
0269 int receive_ring);
0270
0271 static inline int tb_xdomain_disable_all_paths(struct tb_xdomain *xd)
0272 {
0273 return tb_xdomain_disable_paths(xd, -1, -1, -1, -1);
0274 }
0275
0276 struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid);
0277 struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route);
0278
0279 static inline struct tb_xdomain *
0280 tb_xdomain_find_by_uuid_locked(struct tb *tb, const uuid_t *uuid)
0281 {
0282 struct tb_xdomain *xd;
0283
0284 mutex_lock(&tb->lock);
0285 xd = tb_xdomain_find_by_uuid(tb, uuid);
0286 mutex_unlock(&tb->lock);
0287
0288 return xd;
0289 }
0290
0291 static inline struct tb_xdomain *
0292 tb_xdomain_find_by_route_locked(struct tb *tb, u64 route)
0293 {
0294 struct tb_xdomain *xd;
0295
0296 mutex_lock(&tb->lock);
0297 xd = tb_xdomain_find_by_route(tb, route);
0298 mutex_unlock(&tb->lock);
0299
0300 return xd;
0301 }
0302
0303 static inline struct tb_xdomain *tb_xdomain_get(struct tb_xdomain *xd)
0304 {
0305 if (xd)
0306 get_device(&xd->dev);
0307 return xd;
0308 }
0309
0310 static inline void tb_xdomain_put(struct tb_xdomain *xd)
0311 {
0312 if (xd)
0313 put_device(&xd->dev);
0314 }
0315
0316 static inline bool tb_is_xdomain(const struct device *dev)
0317 {
0318 return dev->type == &tb_xdomain_type;
0319 }
0320
0321 static inline struct tb_xdomain *tb_to_xdomain(struct device *dev)
0322 {
0323 if (tb_is_xdomain(dev))
0324 return container_of(dev, struct tb_xdomain, dev);
0325 return NULL;
0326 }
0327
0328 int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
0329 size_t size, enum tb_cfg_pkg_type type);
0330 int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
0331 size_t request_size, enum tb_cfg_pkg_type request_type,
0332 void *response, size_t response_size,
0333 enum tb_cfg_pkg_type response_type,
0334 unsigned int timeout_msec);
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354 struct tb_protocol_handler {
0355 const uuid_t *uuid;
0356 int (*callback)(const void *buf, size_t size, void *data);
0357 void *data;
0358 struct list_head list;
0359 };
0360
0361 int tb_register_protocol_handler(struct tb_protocol_handler *handler);
0362 void tb_unregister_protocol_handler(struct tb_protocol_handler *handler);
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381 struct tb_service {
0382 struct device dev;
0383 int id;
0384 const char *key;
0385 u32 prtcid;
0386 u32 prtcvers;
0387 u32 prtcrevs;
0388 u32 prtcstns;
0389 struct dentry *debugfs_dir;
0390 };
0391
0392 static inline struct tb_service *tb_service_get(struct tb_service *svc)
0393 {
0394 if (svc)
0395 get_device(&svc->dev);
0396 return svc;
0397 }
0398
0399 static inline void tb_service_put(struct tb_service *svc)
0400 {
0401 if (svc)
0402 put_device(&svc->dev);
0403 }
0404
0405 static inline bool tb_is_service(const struct device *dev)
0406 {
0407 return dev->type == &tb_service_type;
0408 }
0409
0410 static inline struct tb_service *tb_to_service(struct device *dev)
0411 {
0412 if (tb_is_service(dev))
0413 return container_of(dev, struct tb_service, dev);
0414 return NULL;
0415 }
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425 struct tb_service_driver {
0426 struct device_driver driver;
0427 int (*probe)(struct tb_service *svc, const struct tb_service_id *id);
0428 void (*remove)(struct tb_service *svc);
0429 void (*shutdown)(struct tb_service *svc);
0430 const struct tb_service_id *id_table;
0431 };
0432
0433 #define TB_SERVICE(key, id) \
0434 .match_flags = TBSVC_MATCH_PROTOCOL_KEY | \
0435 TBSVC_MATCH_PROTOCOL_ID, \
0436 .protocol_key = (key), \
0437 .protocol_id = (id)
0438
0439 int tb_register_service_driver(struct tb_service_driver *drv);
0440 void tb_unregister_service_driver(struct tb_service_driver *drv);
0441
0442 static inline void *tb_service_get_drvdata(const struct tb_service *svc)
0443 {
0444 return dev_get_drvdata(&svc->dev);
0445 }
0446
0447 static inline void tb_service_set_drvdata(struct tb_service *svc, void *data)
0448 {
0449 dev_set_drvdata(&svc->dev, data);
0450 }
0451
0452 static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc)
0453 {
0454 return tb_to_xdomain(svc->dev.parent);
0455 }
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475 struct tb_nhi {
0476 spinlock_t lock;
0477 struct pci_dev *pdev;
0478 const struct tb_nhi_ops *ops;
0479 void __iomem *iobase;
0480 struct tb_ring **tx_rings;
0481 struct tb_ring **rx_rings;
0482 struct ida msix_ida;
0483 bool going_away;
0484 bool iommu_dma_protection;
0485 struct work_struct interrupt_work;
0486 u32 hop_count;
0487 unsigned long quirks;
0488 };
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516 struct tb_ring {
0517 spinlock_t lock;
0518 struct tb_nhi *nhi;
0519 int size;
0520 int hop;
0521 int head;
0522 int tail;
0523 struct ring_desc *descriptors;
0524 dma_addr_t descriptors_dma;
0525 struct list_head queue;
0526 struct list_head in_flight;
0527 struct work_struct work;
0528 bool is_tx:1;
0529 bool running:1;
0530 int irq;
0531 u8 vector;
0532 unsigned int flags;
0533 int e2e_tx_hop;
0534 u16 sof_mask;
0535 u16 eof_mask;
0536 void (*start_poll)(void *data);
0537 void *poll_data;
0538 };
0539
0540
0541 #define RING_FLAG_NO_SUSPEND BIT(0)
0542
0543 #define RING_FLAG_FRAME BIT(1)
0544
0545 #define RING_FLAG_E2E BIT(2)
0546
0547 struct ring_frame;
0548 typedef void (*ring_cb)(struct tb_ring *, struct ring_frame *, bool canceled);
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559 enum ring_desc_flags {
0560 RING_DESC_ISOCH = 0x1,
0561 RING_DESC_CRC_ERROR = 0x1,
0562 RING_DESC_COMPLETED = 0x2,
0563 RING_DESC_POSTED = 0x4,
0564 RING_DESC_BUFFER_OVERRUN = 0x04,
0565 RING_DESC_INTERRUPT = 0x8,
0566 };
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578 struct ring_frame {
0579 dma_addr_t buffer_phy;
0580 ring_cb callback;
0581 struct list_head list;
0582 u32 size:12;
0583 u32 flags:12;
0584 u32 eof:4;
0585 u32 sof:4;
0586 };
0587
0588
0589 #define TB_FRAME_SIZE 0x100
0590
0591 struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
0592 unsigned int flags);
0593 struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
0594 unsigned int flags, int e2e_tx_hop,
0595 u16 sof_mask, u16 eof_mask,
0596 void (*start_poll)(void *), void *poll_data);
0597 void tb_ring_start(struct tb_ring *ring);
0598 void tb_ring_stop(struct tb_ring *ring);
0599 void tb_ring_free(struct tb_ring *ring);
0600
0601 int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame);
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618
0619 static inline int tb_ring_rx(struct tb_ring *ring, struct ring_frame *frame)
0620 {
0621 WARN_ON(ring->is_tx);
0622 return __tb_ring_enqueue(ring, frame);
0623 }
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640 static inline int tb_ring_tx(struct tb_ring *ring, struct ring_frame *frame)
0641 {
0642 WARN_ON(!ring->is_tx);
0643 return __tb_ring_enqueue(ring, frame);
0644 }
0645
0646
0647 struct ring_frame *tb_ring_poll(struct tb_ring *ring);
0648 void tb_ring_poll_complete(struct tb_ring *ring);
0649
0650
0651
0652
0653
0654
0655
0656
0657 static inline struct device *tb_ring_dma_device(struct tb_ring *ring)
0658 {
0659 return &ring->nhi->pdev->dev;
0660 }
0661
0662 #endif