0001
0002
0003
0004
0005 #ifndef LINUX_DMAENGINE_H
0006 #define LINUX_DMAENGINE_H
0007
0008 #include <linux/device.h>
0009 #include <linux/err.h>
0010 #include <linux/uio.h>
0011 #include <linux/bug.h>
0012 #include <linux/scatterlist.h>
0013 #include <linux/bitmap.h>
0014 #include <linux/types.h>
0015 #include <asm/page.h>
0016
0017
0018
0019
0020
0021
0022 typedef s32 dma_cookie_t;
0023 #define DMA_MIN_COOKIE 1
0024
0025 static inline int dma_submit_error(dma_cookie_t cookie)
0026 {
0027 return cookie < 0 ? cookie : 0;
0028 }
0029
0030
0031
0032
0033
0034
0035
0036
0037 enum dma_status {
0038 DMA_COMPLETE,
0039 DMA_IN_PROGRESS,
0040 DMA_PAUSED,
0041 DMA_ERROR,
0042 DMA_OUT_OF_ORDER,
0043 };
0044
0045
0046
0047
0048
0049
0050
0051 enum dma_transaction_type {
0052 DMA_MEMCPY,
0053 DMA_XOR,
0054 DMA_PQ,
0055 DMA_XOR_VAL,
0056 DMA_PQ_VAL,
0057 DMA_MEMSET,
0058 DMA_MEMSET_SG,
0059 DMA_INTERRUPT,
0060 DMA_PRIVATE,
0061 DMA_ASYNC_TX,
0062 DMA_SLAVE,
0063 DMA_CYCLIC,
0064 DMA_INTERLEAVE,
0065 DMA_COMPLETION_NO_ORDER,
0066 DMA_REPEAT,
0067 DMA_LOAD_EOT,
0068
0069 DMA_TX_TYPE_END,
0070 };
0071
0072
0073
0074
0075
0076
0077
0078
0079 enum dma_transfer_direction {
0080 DMA_MEM_TO_MEM,
0081 DMA_MEM_TO_DEV,
0082 DMA_DEV_TO_MEM,
0083 DMA_DEV_TO_DEV,
0084 DMA_TRANS_NONE,
0085 };
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125 struct data_chunk {
0126 size_t size;
0127 size_t icg;
0128 size_t dst_icg;
0129 size_t src_icg;
0130 };
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150 struct dma_interleaved_template {
0151 dma_addr_t src_start;
0152 dma_addr_t dst_start;
0153 enum dma_transfer_direction dir;
0154 bool src_inc;
0155 bool dst_inc;
0156 bool src_sgl;
0157 bool dst_sgl;
0158 size_t numf;
0159 size_t frame_size;
0160 struct data_chunk sgl[];
0161 };
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194 enum dma_ctrl_flags {
0195 DMA_PREP_INTERRUPT = (1 << 0),
0196 DMA_CTRL_ACK = (1 << 1),
0197 DMA_PREP_PQ_DISABLE_P = (1 << 2),
0198 DMA_PREP_PQ_DISABLE_Q = (1 << 3),
0199 DMA_PREP_CONTINUE = (1 << 4),
0200 DMA_PREP_FENCE = (1 << 5),
0201 DMA_CTRL_REUSE = (1 << 6),
0202 DMA_PREP_CMD = (1 << 7),
0203 DMA_PREP_REPEAT = (1 << 8),
0204 DMA_PREP_LOAD_EOT = (1 << 9),
0205 };
0206
0207
0208
0209
0210 enum sum_check_bits {
0211 SUM_CHECK_P = 0,
0212 SUM_CHECK_Q = 1,
0213 };
0214
0215
0216
0217
0218
0219
0220 enum sum_check_flags {
0221 SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
0222 SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
0223 };
0224
0225
0226
0227
0228
0229
0230 typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282 enum dma_desc_metadata_mode {
0283 DESC_METADATA_NONE = 0,
0284 DESC_METADATA_CLIENT = BIT(0),
0285 DESC_METADATA_ENGINE = BIT(1),
0286 };
0287
0288
0289
0290
0291
0292
0293 struct dma_chan_percpu {
0294
0295 unsigned long memcpy_count;
0296 unsigned long bytes_transferred;
0297 };
0298
0299
0300
0301
0302
0303
0304 struct dma_router {
0305 struct device *dev;
0306 void (*route_free)(struct device *dev, void *route_data);
0307 };
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328 struct dma_chan {
0329 struct dma_device *device;
0330 struct device *slave;
0331 dma_cookie_t cookie;
0332 dma_cookie_t completed_cookie;
0333
0334
0335 int chan_id;
0336 struct dma_chan_dev *dev;
0337 const char *name;
0338 #ifdef CONFIG_DEBUG_FS
0339 char *dbg_client_name;
0340 #endif
0341
0342 struct list_head device_node;
0343 struct dma_chan_percpu __percpu *local;
0344 int client_count;
0345 int table_count;
0346
0347
0348 struct dma_router *router;
0349 void *route_data;
0350
0351 void *private;
0352 };
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362 struct dma_chan_dev {
0363 struct dma_chan *chan;
0364 struct device device;
0365 int dev_id;
0366 bool chan_dma_dev;
0367 };
0368
0369
0370
0371
0372
0373 enum dma_slave_buswidth {
0374 DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
0375 DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
0376 DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
0377 DMA_SLAVE_BUSWIDTH_3_BYTES = 3,
0378 DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
0379 DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
0380 DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
0381 DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
0382 DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
0383 DMA_SLAVE_BUSWIDTH_128_BYTES = 128,
0384 };
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437 struct dma_slave_config {
0438 enum dma_transfer_direction direction;
0439 phys_addr_t src_addr;
0440 phys_addr_t dst_addr;
0441 enum dma_slave_buswidth src_addr_width;
0442 enum dma_slave_buswidth dst_addr_width;
0443 u32 src_maxburst;
0444 u32 dst_maxburst;
0445 u32 src_port_window_size;
0446 u32 dst_port_window_size;
0447 bool device_fc;
0448 void *peripheral_config;
0449 size_t peripheral_size;
0450 };
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472 enum dma_residue_granularity {
0473 DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0,
0474 DMA_RESIDUE_GRANULARITY_SEGMENT = 1,
0475 DMA_RESIDUE_GRANULARITY_BURST = 2,
0476 };
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501 struct dma_slave_caps {
0502 u32 src_addr_widths;
0503 u32 dst_addr_widths;
0504 u32 directions;
0505 u32 min_burst;
0506 u32 max_burst;
0507 u32 max_sg_burst;
0508 bool cmd_pause;
0509 bool cmd_resume;
0510 bool cmd_terminate;
0511 enum dma_residue_granularity residue_granularity;
0512 bool descriptor_reuse;
0513 };
0514
0515 static inline const char *dma_chan_name(struct dma_chan *chan)
0516 {
0517 return dev_name(&chan->dev->device);
0518 }
0519
0520 void dma_chan_cleanup(struct kref *kref);
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533 typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
0534
0535 typedef void (*dma_async_tx_callback)(void *dma_async_param);
0536
0537 enum dmaengine_tx_result {
0538 DMA_TRANS_NOERROR = 0,
0539 DMA_TRANS_READ_FAILED,
0540 DMA_TRANS_WRITE_FAILED,
0541 DMA_TRANS_ABORTED,
0542 };
0543
0544 struct dmaengine_result {
0545 enum dmaengine_tx_result result;
0546 u32 residue;
0547 };
0548
0549 typedef void (*dma_async_tx_callback_result)(void *dma_async_param,
0550 const struct dmaengine_result *result);
0551
0552 struct dmaengine_unmap_data {
0553 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
0554 u16 map_cnt;
0555 #else
0556 u8 map_cnt;
0557 #endif
0558 u8 to_cnt;
0559 u8 from_cnt;
0560 u8 bidi_cnt;
0561 struct device *dev;
0562 struct kref kref;
0563 size_t len;
0564 dma_addr_t addr[];
0565 };
0566
0567 struct dma_async_tx_descriptor;
0568
0569 struct dma_descriptor_metadata_ops {
0570 int (*attach)(struct dma_async_tx_descriptor *desc, void *data,
0571 size_t len);
0572
0573 void *(*get_ptr)(struct dma_async_tx_descriptor *desc,
0574 size_t *payload_len, size_t *max_len);
0575 int (*set_len)(struct dma_async_tx_descriptor *desc,
0576 size_t payload_len);
0577 };
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602 struct dma_async_tx_descriptor {
0603 dma_cookie_t cookie;
0604 enum dma_ctrl_flags flags;
0605 dma_addr_t phys;
0606 struct dma_chan *chan;
0607 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
0608 int (*desc_free)(struct dma_async_tx_descriptor *tx);
0609 dma_async_tx_callback callback;
0610 dma_async_tx_callback_result callback_result;
0611 void *callback_param;
0612 struct dmaengine_unmap_data *unmap;
0613 enum dma_desc_metadata_mode desc_metadata_mode;
0614 struct dma_descriptor_metadata_ops *metadata_ops;
0615 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
0616 struct dma_async_tx_descriptor *next;
0617 struct dma_async_tx_descriptor *parent;
0618 spinlock_t lock;
0619 #endif
0620 };
0621
0622 #ifdef CONFIG_DMA_ENGINE
0623 static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
0624 struct dmaengine_unmap_data *unmap)
0625 {
0626 kref_get(&unmap->kref);
0627 tx->unmap = unmap;
0628 }
0629
0630 struct dmaengine_unmap_data *
0631 dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags);
0632 void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap);
0633 #else
0634 static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
0635 struct dmaengine_unmap_data *unmap)
0636 {
0637 }
0638 static inline struct dmaengine_unmap_data *
0639 dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
0640 {
0641 return NULL;
0642 }
0643 static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
0644 {
0645 }
0646 #endif
0647
0648 static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
0649 {
0650 if (!tx->unmap)
0651 return;
0652
0653 dmaengine_unmap_put(tx->unmap);
0654 tx->unmap = NULL;
0655 }
0656
0657 #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
0658 static inline void txd_lock(struct dma_async_tx_descriptor *txd)
0659 {
0660 }
0661 static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
0662 {
0663 }
0664 static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
0665 {
0666 BUG();
0667 }
0668 static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
0669 {
0670 }
0671 static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
0672 {
0673 }
0674 static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
0675 {
0676 return NULL;
0677 }
0678 static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
0679 {
0680 return NULL;
0681 }
0682
0683 #else
0684 static inline void txd_lock(struct dma_async_tx_descriptor *txd)
0685 {
0686 spin_lock_bh(&txd->lock);
0687 }
0688 static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
0689 {
0690 spin_unlock_bh(&txd->lock);
0691 }
0692 static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
0693 {
0694 txd->next = next;
0695 next->parent = txd;
0696 }
0697 static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
0698 {
0699 txd->parent = NULL;
0700 }
0701 static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
0702 {
0703 txd->next = NULL;
0704 }
0705 static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
0706 {
0707 return txd->parent;
0708 }
0709 static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
0710 {
0711 return txd->next;
0712 }
0713 #endif
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725 struct dma_tx_state {
0726 dma_cookie_t last;
0727 dma_cookie_t used;
0728 u32 residue;
0729 u32 in_flight_bytes;
0730 };
0731
0732
0733
0734
0735
0736 enum dmaengine_alignment {
0737 DMAENGINE_ALIGN_1_BYTE = 0,
0738 DMAENGINE_ALIGN_2_BYTES = 1,
0739 DMAENGINE_ALIGN_4_BYTES = 2,
0740 DMAENGINE_ALIGN_8_BYTES = 3,
0741 DMAENGINE_ALIGN_16_BYTES = 4,
0742 DMAENGINE_ALIGN_32_BYTES = 5,
0743 DMAENGINE_ALIGN_64_BYTES = 6,
0744 DMAENGINE_ALIGN_128_BYTES = 7,
0745 DMAENGINE_ALIGN_256_BYTES = 8,
0746 };
0747
0748
0749
0750
0751
0752
0753
0754
0755 struct dma_slave_map {
0756 const char *devname;
0757 const char *slave;
0758 void *param;
0759 };
0760
0761
0762
0763
0764
0765
0766
0767
0768 struct dma_filter {
0769 dma_filter_fn fn;
0770 int mapcnt;
0771 const struct dma_slave_map *map;
0772 };
0773
0774
0775
0776
0777
0778
0779
0780
0781
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793
0794
0795
0796
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806
0807
0808
0809
0810
0811
0812
0813
0814
0815
0816
0817
0818
0819
0820
0821
0822
0823
0824
0825
0826
0827
0828
0829
0830
0831
0832
0833
0834
0835
0836
0837
0838
0839
0840
0841
0842
0843
0844
0845
0846
0847
0848
0849
0850
0851 struct dma_device {
0852 struct kref ref;
0853 unsigned int chancnt;
0854 unsigned int privatecnt;
0855 struct list_head channels;
0856 struct list_head global_node;
0857 struct dma_filter filter;
0858 dma_cap_mask_t cap_mask;
0859 enum dma_desc_metadata_mode desc_metadata_modes;
0860 unsigned short max_xor;
0861 unsigned short max_pq;
0862 enum dmaengine_alignment copy_align;
0863 enum dmaengine_alignment xor_align;
0864 enum dmaengine_alignment pq_align;
0865 enum dmaengine_alignment fill_align;
0866 #define DMA_HAS_PQ_CONTINUE (1 << 15)
0867
0868 int dev_id;
0869 struct device *dev;
0870 struct module *owner;
0871 struct ida chan_ida;
0872
0873 u32 src_addr_widths;
0874 u32 dst_addr_widths;
0875 u32 directions;
0876 u32 min_burst;
0877 u32 max_burst;
0878 u32 max_sg_burst;
0879 bool descriptor_reuse;
0880 enum dma_residue_granularity residue_granularity;
0881
0882 int (*device_alloc_chan_resources)(struct dma_chan *chan);
0883 int (*device_router_config)(struct dma_chan *chan);
0884 void (*device_free_chan_resources)(struct dma_chan *chan);
0885
0886 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
0887 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
0888 size_t len, unsigned long flags);
0889 struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
0890 struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
0891 unsigned int src_cnt, size_t len, unsigned long flags);
0892 struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
0893 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
0894 size_t len, enum sum_check_flags *result, unsigned long flags);
0895 struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
0896 struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
0897 unsigned int src_cnt, const unsigned char *scf,
0898 size_t len, unsigned long flags);
0899 struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
0900 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
0901 unsigned int src_cnt, const unsigned char *scf, size_t len,
0902 enum sum_check_flags *pqres, unsigned long flags);
0903 struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
0904 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
0905 unsigned long flags);
0906 struct dma_async_tx_descriptor *(*device_prep_dma_memset_sg)(
0907 struct dma_chan *chan, struct scatterlist *sg,
0908 unsigned int nents, int value, unsigned long flags);
0909 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
0910 struct dma_chan *chan, unsigned long flags);
0911
0912 struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
0913 struct dma_chan *chan, struct scatterlist *sgl,
0914 unsigned int sg_len, enum dma_transfer_direction direction,
0915 unsigned long flags, void *context);
0916 struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
0917 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
0918 size_t period_len, enum dma_transfer_direction direction,
0919 unsigned long flags);
0920 struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
0921 struct dma_chan *chan, struct dma_interleaved_template *xt,
0922 unsigned long flags);
0923 struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)(
0924 struct dma_chan *chan, dma_addr_t dst, u64 data,
0925 unsigned long flags);
0926
0927 void (*device_caps)(struct dma_chan *chan,
0928 struct dma_slave_caps *caps);
0929 int (*device_config)(struct dma_chan *chan,
0930 struct dma_slave_config *config);
0931 int (*device_pause)(struct dma_chan *chan);
0932 int (*device_resume)(struct dma_chan *chan);
0933 int (*device_terminate_all)(struct dma_chan *chan);
0934 void (*device_synchronize)(struct dma_chan *chan);
0935
0936 enum dma_status (*device_tx_status)(struct dma_chan *chan,
0937 dma_cookie_t cookie,
0938 struct dma_tx_state *txstate);
0939 void (*device_issue_pending)(struct dma_chan *chan);
0940 void (*device_release)(struct dma_device *dev);
0941
0942 void (*dbg_summary_show)(struct seq_file *s, struct dma_device *dev);
0943 struct dentry *dbg_dev_root;
0944 };
0945
0946 static inline int dmaengine_slave_config(struct dma_chan *chan,
0947 struct dma_slave_config *config)
0948 {
0949 if (chan->device->device_config)
0950 return chan->device->device_config(chan, config);
0951
0952 return -ENOSYS;
0953 }
0954
0955 static inline bool is_slave_direction(enum dma_transfer_direction direction)
0956 {
0957 return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
0958 }
0959
0960 static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
0961 struct dma_chan *chan, dma_addr_t buf, size_t len,
0962 enum dma_transfer_direction dir, unsigned long flags)
0963 {
0964 struct scatterlist sg;
0965 sg_init_table(&sg, 1);
0966 sg_dma_address(&sg) = buf;
0967 sg_dma_len(&sg) = len;
0968
0969 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
0970 return NULL;
0971
0972 return chan->device->device_prep_slave_sg(chan, &sg, 1,
0973 dir, flags, NULL);
0974 }
0975
0976 static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
0977 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
0978 enum dma_transfer_direction dir, unsigned long flags)
0979 {
0980 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
0981 return NULL;
0982
0983 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
0984 dir, flags, NULL);
0985 }
0986
0987 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
0988 struct rio_dma_ext;
0989 static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
0990 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
0991 enum dma_transfer_direction dir, unsigned long flags,
0992 struct rio_dma_ext *rio_ext)
0993 {
0994 if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
0995 return NULL;
0996
0997 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
0998 dir, flags, rio_ext);
0999 }
1000 #endif
1001
1002 static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
1003 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1004 size_t period_len, enum dma_transfer_direction dir,
1005 unsigned long flags)
1006 {
1007 if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic)
1008 return NULL;
1009
1010 return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
1011 period_len, dir, flags);
1012 }
1013
1014 static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
1015 struct dma_chan *chan, struct dma_interleaved_template *xt,
1016 unsigned long flags)
1017 {
1018 if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma)
1019 return NULL;
1020 if (flags & DMA_PREP_REPEAT &&
1021 !test_bit(DMA_REPEAT, chan->device->cap_mask.bits))
1022 return NULL;
1023
1024 return chan->device->device_prep_interleaved_dma(chan, xt, flags);
1025 }
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035 static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
1036 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
1037 unsigned long flags)
1038 {
1039 if (!chan || !chan->device || !chan->device->device_prep_dma_memset)
1040 return NULL;
1041
1042 return chan->device->device_prep_dma_memset(chan, dest, value,
1043 len, flags);
1044 }
1045
1046 static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy(
1047 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1048 size_t len, unsigned long flags)
1049 {
1050 if (!chan || !chan->device || !chan->device->device_prep_dma_memcpy)
1051 return NULL;
1052
1053 return chan->device->device_prep_dma_memcpy(chan, dest, src,
1054 len, flags);
1055 }
1056
1057 static inline bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan,
1058 enum dma_desc_metadata_mode mode)
1059 {
1060 if (!chan)
1061 return false;
1062
1063 return !!(chan->device->desc_metadata_modes & mode);
1064 }
1065
1066 #ifdef CONFIG_DMA_ENGINE
1067 int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
1068 void *data, size_t len);
1069 void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
1070 size_t *payload_len, size_t *max_len);
1071 int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
1072 size_t payload_len);
1073 #else
1074 static inline int dmaengine_desc_attach_metadata(
1075 struct dma_async_tx_descriptor *desc, void *data, size_t len)
1076 {
1077 return -EINVAL;
1078 }
1079 static inline void *dmaengine_desc_get_metadata_ptr(
1080 struct dma_async_tx_descriptor *desc, size_t *payload_len,
1081 size_t *max_len)
1082 {
1083 return NULL;
1084 }
1085 static inline int dmaengine_desc_set_metadata_len(
1086 struct dma_async_tx_descriptor *desc, size_t payload_len)
1087 {
1088 return -EINVAL;
1089 }
1090 #endif
1091
1092
1093
1094
1095
1096
1097
1098
1099 static inline int dmaengine_terminate_all(struct dma_chan *chan)
1100 {
1101 if (chan->device->device_terminate_all)
1102 return chan->device->device_terminate_all(chan);
1103
1104 return -ENOSYS;
1105 }
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128 static inline int dmaengine_terminate_async(struct dma_chan *chan)
1129 {
1130 if (chan->device->device_terminate_all)
1131 return chan->device->device_terminate_all(chan);
1132
1133 return -EINVAL;
1134 }
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154 static inline void dmaengine_synchronize(struct dma_chan *chan)
1155 {
1156 might_sleep();
1157
1158 if (chan->device->device_synchronize)
1159 chan->device->device_synchronize(chan);
1160 }
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176 static inline int dmaengine_terminate_sync(struct dma_chan *chan)
1177 {
1178 int ret;
1179
1180 ret = dmaengine_terminate_async(chan);
1181 if (ret)
1182 return ret;
1183
1184 dmaengine_synchronize(chan);
1185
1186 return 0;
1187 }
1188
1189 static inline int dmaengine_pause(struct dma_chan *chan)
1190 {
1191 if (chan->device->device_pause)
1192 return chan->device->device_pause(chan);
1193
1194 return -ENOSYS;
1195 }
1196
1197 static inline int dmaengine_resume(struct dma_chan *chan)
1198 {
1199 if (chan->device->device_resume)
1200 return chan->device->device_resume(chan);
1201
1202 return -ENOSYS;
1203 }
1204
1205 static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan,
1206 dma_cookie_t cookie, struct dma_tx_state *state)
1207 {
1208 return chan->device->device_tx_status(chan, cookie, state);
1209 }
1210
1211 static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
1212 {
1213 return desc->tx_submit(desc);
1214 }
1215
1216 static inline bool dmaengine_check_align(enum dmaengine_alignment align,
1217 size_t off1, size_t off2, size_t len)
1218 {
1219 return !(((1 << align) - 1) & (off1 | off2 | len));
1220 }
1221
1222 static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
1223 size_t off2, size_t len)
1224 {
1225 return dmaengine_check_align(dev->copy_align, off1, off2, len);
1226 }
1227
1228 static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
1229 size_t off2, size_t len)
1230 {
1231 return dmaengine_check_align(dev->xor_align, off1, off2, len);
1232 }
1233
1234 static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
1235 size_t off2, size_t len)
1236 {
1237 return dmaengine_check_align(dev->pq_align, off1, off2, len);
1238 }
1239
1240 static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
1241 size_t off2, size_t len)
1242 {
1243 return dmaengine_check_align(dev->fill_align, off1, off2, len);
1244 }
1245
1246 static inline void
1247 dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
1248 {
1249 dma->max_pq = maxpq;
1250 if (has_pq_continue)
1251 dma->max_pq |= DMA_HAS_PQ_CONTINUE;
1252 }
1253
1254 static inline bool dmaf_continue(enum dma_ctrl_flags flags)
1255 {
1256 return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
1257 }
1258
1259 static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
1260 {
1261 enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
1262
1263 return (flags & mask) == mask;
1264 }
1265
1266 static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
1267 {
1268 return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
1269 }
1270
1271 static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
1272 {
1273 return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
1274 }
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289 static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
1290 {
1291 if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
1292 return dma_dev_to_maxpq(dma);
1293 if (dmaf_p_disabled_continue(flags))
1294 return dma_dev_to_maxpq(dma) - 1;
1295 if (dmaf_continue(flags))
1296 return dma_dev_to_maxpq(dma) - 3;
1297 BUG();
1298 }
1299
1300 static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg,
1301 size_t dir_icg)
1302 {
1303 if (inc) {
1304 if (dir_icg)
1305 return dir_icg;
1306 if (sgl)
1307 return icg;
1308 }
1309
1310 return 0;
1311 }
1312
1313 static inline size_t dmaengine_get_dst_icg(struct dma_interleaved_template *xt,
1314 struct data_chunk *chunk)
1315 {
1316 return dmaengine_get_icg(xt->dst_inc, xt->dst_sgl,
1317 chunk->icg, chunk->dst_icg);
1318 }
1319
1320 static inline size_t dmaengine_get_src_icg(struct dma_interleaved_template *xt,
1321 struct data_chunk *chunk)
1322 {
1323 return dmaengine_get_icg(xt->src_inc, xt->src_sgl,
1324 chunk->icg, chunk->src_icg);
1325 }
1326
1327
1328
1329 #ifdef CONFIG_DMA_ENGINE
1330 void dmaengine_get(void);
1331 void dmaengine_put(void);
1332 #else
1333 static inline void dmaengine_get(void)
1334 {
1335 }
1336 static inline void dmaengine_put(void)
1337 {
1338 }
1339 #endif
1340
1341 #ifdef CONFIG_ASYNC_TX_DMA
1342 #define async_dmaengine_get() dmaengine_get()
1343 #define async_dmaengine_put() dmaengine_put()
1344 #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1345 #define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
1346 #else
1347 #define async_dma_find_channel(type) dma_find_channel(type)
1348 #endif
1349 #else
1350 static inline void async_dmaengine_get(void)
1351 {
1352 }
1353 static inline void async_dmaengine_put(void)
1354 {
1355 }
1356 static inline struct dma_chan *
1357 async_dma_find_channel(enum dma_transaction_type type)
1358 {
1359 return NULL;
1360 }
1361 #endif
1362 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1363 struct dma_chan *chan);
1364
1365 static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
1366 {
1367 tx->flags |= DMA_CTRL_ACK;
1368 }
1369
1370 static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
1371 {
1372 tx->flags &= ~DMA_CTRL_ACK;
1373 }
1374
1375 static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
1376 {
1377 return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
1378 }
1379
1380 #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
1381 static inline void
1382 __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
1383 {
1384 set_bit(tx_type, dstp->bits);
1385 }
1386
1387 #define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
1388 static inline void
1389 __dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
1390 {
1391 clear_bit(tx_type, dstp->bits);
1392 }
1393
1394 #define dma_cap_zero(mask) __dma_cap_zero(&(mask))
1395 static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
1396 {
1397 bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
1398 }
1399
1400 #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
1401 static inline int
1402 __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
1403 {
1404 return test_bit(tx_type, srcp->bits);
1405 }
1406
1407 #define for_each_dma_cap_mask(cap, mask) \
1408 for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END)
1409
1410
1411
1412
1413
1414
1415
1416
1417 static inline void dma_async_issue_pending(struct dma_chan *chan)
1418 {
1419 chan->device->device_issue_pending(chan);
1420 }
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433 static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
1434 dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
1435 {
1436 struct dma_tx_state state;
1437 enum dma_status status;
1438
1439 status = chan->device->device_tx_status(chan, cookie, &state);
1440 if (last)
1441 *last = state.last;
1442 if (used)
1443 *used = state.used;
1444 return status;
1445 }
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456 static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
1457 dma_cookie_t last_complete, dma_cookie_t last_used)
1458 {
1459 if (last_complete <= last_used) {
1460 if ((cookie <= last_complete) || (cookie > last_used))
1461 return DMA_COMPLETE;
1462 } else {
1463 if ((cookie <= last_complete) && (cookie > last_used))
1464 return DMA_COMPLETE;
1465 }
1466 return DMA_IN_PROGRESS;
1467 }
1468
1469 static inline void
1470 dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
1471 {
1472 if (!st)
1473 return;
1474
1475 st->last = last;
1476 st->used = used;
1477 st->residue = residue;
1478 }
1479
1480 #ifdef CONFIG_DMA_ENGINE
1481 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
1482 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
1483 enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
1484 void dma_issue_pending_all(void);
1485 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1486 dma_filter_fn fn, void *fn_param,
1487 struct device_node *np);
1488
1489 struct dma_chan *dma_request_chan(struct device *dev, const char *name);
1490 struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
1491
1492 void dma_release_channel(struct dma_chan *chan);
1493 int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
1494 #else
1495 static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
1496 {
1497 return NULL;
1498 }
1499 static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
1500 {
1501 return DMA_COMPLETE;
1502 }
1503 static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1504 {
1505 return DMA_COMPLETE;
1506 }
1507 static inline void dma_issue_pending_all(void)
1508 {
1509 }
1510 static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1511 dma_filter_fn fn,
1512 void *fn_param,
1513 struct device_node *np)
1514 {
1515 return NULL;
1516 }
1517 static inline struct dma_chan *dma_request_chan(struct device *dev,
1518 const char *name)
1519 {
1520 return ERR_PTR(-ENODEV);
1521 }
1522 static inline struct dma_chan *dma_request_chan_by_mask(
1523 const dma_cap_mask_t *mask)
1524 {
1525 return ERR_PTR(-ENODEV);
1526 }
1527 static inline void dma_release_channel(struct dma_chan *chan)
1528 {
1529 }
1530 static inline int dma_get_slave_caps(struct dma_chan *chan,
1531 struct dma_slave_caps *caps)
1532 {
1533 return -ENXIO;
1534 }
1535 #endif
1536
1537 static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
1538 {
1539 struct dma_slave_caps caps;
1540 int ret;
1541
1542 ret = dma_get_slave_caps(tx->chan, &caps);
1543 if (ret)
1544 return ret;
1545
1546 if (!caps.descriptor_reuse)
1547 return -EPERM;
1548
1549 tx->flags |= DMA_CTRL_REUSE;
1550 return 0;
1551 }
1552
1553 static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
1554 {
1555 tx->flags &= ~DMA_CTRL_REUSE;
1556 }
1557
1558 static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
1559 {
1560 return (tx->flags & DMA_CTRL_REUSE) == DMA_CTRL_REUSE;
1561 }
1562
1563 static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
1564 {
1565
1566 if (!dmaengine_desc_test_reuse(desc))
1567 return -EPERM;
1568
1569 return desc->desc_free(desc);
1570 }
1571
1572
1573
1574 int dma_async_device_register(struct dma_device *device);
1575 int dmaenginem_async_device_register(struct dma_device *device);
1576 void dma_async_device_unregister(struct dma_device *device);
1577 int dma_async_device_channel_register(struct dma_device *device,
1578 struct dma_chan *chan);
1579 void dma_async_device_channel_unregister(struct dma_device *device,
1580 struct dma_chan *chan);
1581 void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
1582 #define dma_request_channel(mask, x, y) \
1583 __dma_request_channel(&(mask), x, y, NULL)
1584
1585
1586 static inline struct dma_chan * __deprecated
1587 dma_request_slave_channel(struct device *dev, const char *name)
1588 {
1589 struct dma_chan *ch = dma_request_chan(dev, name);
1590
1591 return IS_ERR(ch) ? NULL : ch;
1592 }
1593
1594 static inline struct dma_chan
1595 *dma_request_slave_channel_compat(const dma_cap_mask_t mask,
1596 dma_filter_fn fn, void *fn_param,
1597 struct device *dev, const char *name)
1598 {
1599 struct dma_chan *chan;
1600
1601 chan = dma_request_slave_channel(dev, name);
1602 if (chan)
1603 return chan;
1604
1605 if (!fn || !fn_param)
1606 return NULL;
1607
1608 return __dma_request_channel(&mask, fn, fn_param, NULL);
1609 }
1610
1611 static inline char *
1612 dmaengine_get_direction_text(enum dma_transfer_direction dir)
1613 {
1614 switch (dir) {
1615 case DMA_DEV_TO_MEM:
1616 return "DEV_TO_MEM";
1617 case DMA_MEM_TO_DEV:
1618 return "MEM_TO_DEV";
1619 case DMA_MEM_TO_MEM:
1620 return "MEM_TO_MEM";
1621 case DMA_DEV_TO_DEV:
1622 return "DEV_TO_DEV";
1623 default:
1624 return "invalid";
1625 }
1626 }
1627
1628 static inline struct device *dmaengine_get_dma_device(struct dma_chan *chan)
1629 {
1630 if (chan->dev->chan_dma_dev)
1631 return &chan->dev->device;
1632
1633 return chan->device->dev;
1634 }
1635
1636 #endif