0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #ifndef _ASM_DMA_H
0014 #define _ASM_DMA_H
0015
0016 #include <asm/io.h> /* need byte IO */
0017 #include <linux/spinlock.h> /* And spinlocks */
0018 #include <linux/delay.h>
0019
0020
0021 #ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
0022 #define dma_outb outb_p
0023 #else
0024 #define dma_outb outb
0025 #endif
0026
0027 #define dma_inb inb
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077 #ifndef CONFIG_GENERIC_ISA_DMA_SUPPORT_BROKEN
0078 #define MAX_DMA_CHANNELS 8
0079 #endif
0080
0081
0082
0083
0084
0085
0086
0087 #if defined(CONFIG_SGI_IP22) || defined(CONFIG_SGI_IP28)
0088
0089 #define MAX_DMA_ADDRESS PAGE_OFFSET
0090 #else
0091 #define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x01000000)
0092 #endif
0093 #define MAX_DMA_PFN PFN_DOWN(virt_to_phys((void *)MAX_DMA_ADDRESS))
0094
0095 #ifndef MAX_DMA32_PFN
0096 #define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT))
0097 #endif
0098
0099
0100 #define IO_DMA1_BASE 0x00
0101 #define IO_DMA2_BASE 0xC0
0102
0103
0104 #define DMA1_CMD_REG 0x08
0105 #define DMA1_STAT_REG 0x08
0106 #define DMA1_REQ_REG 0x09
0107 #define DMA1_MASK_REG 0x0A
0108 #define DMA1_MODE_REG 0x0B
0109 #define DMA1_CLEAR_FF_REG 0x0C
0110 #define DMA1_TEMP_REG 0x0D
0111 #define DMA1_RESET_REG 0x0D
0112 #define DMA1_CLR_MASK_REG 0x0E
0113 #define DMA1_MASK_ALL_REG 0x0F
0114
0115 #define DMA2_CMD_REG 0xD0
0116 #define DMA2_STAT_REG 0xD0
0117 #define DMA2_REQ_REG 0xD2
0118 #define DMA2_MASK_REG 0xD4
0119 #define DMA2_MODE_REG 0xD6
0120 #define DMA2_CLEAR_FF_REG 0xD8
0121 #define DMA2_TEMP_REG 0xDA
0122 #define DMA2_RESET_REG 0xDA
0123 #define DMA2_CLR_MASK_REG 0xDC
0124 #define DMA2_MASK_ALL_REG 0xDE
0125
0126 #define DMA_ADDR_0 0x00
0127 #define DMA_ADDR_1 0x02
0128 #define DMA_ADDR_2 0x04
0129 #define DMA_ADDR_3 0x06
0130 #define DMA_ADDR_4 0xC0
0131 #define DMA_ADDR_5 0xC4
0132 #define DMA_ADDR_6 0xC8
0133 #define DMA_ADDR_7 0xCC
0134
0135 #define DMA_CNT_0 0x01
0136 #define DMA_CNT_1 0x03
0137 #define DMA_CNT_2 0x05
0138 #define DMA_CNT_3 0x07
0139 #define DMA_CNT_4 0xC2
0140 #define DMA_CNT_5 0xC6
0141 #define DMA_CNT_6 0xCA
0142 #define DMA_CNT_7 0xCE
0143
0144 #define DMA_PAGE_0 0x87
0145 #define DMA_PAGE_1 0x83
0146 #define DMA_PAGE_2 0x81
0147 #define DMA_PAGE_3 0x82
0148 #define DMA_PAGE_5 0x8B
0149 #define DMA_PAGE_6 0x89
0150 #define DMA_PAGE_7 0x8A
0151
0152 #define DMA_MODE_READ 0x44
0153 #define DMA_MODE_WRITE 0x48
0154 #define DMA_MODE_CASCADE 0xC0
0155
0156 #define DMA_AUTOINIT 0x10
0157
0158 extern spinlock_t dma_spin_lock;
0159
0160 static __inline__ unsigned long claim_dma_lock(void)
0161 {
0162 unsigned long flags;
0163 spin_lock_irqsave(&dma_spin_lock, flags);
0164 return flags;
0165 }
0166
0167 static __inline__ void release_dma_lock(unsigned long flags)
0168 {
0169 spin_unlock_irqrestore(&dma_spin_lock, flags);
0170 }
0171
0172
0173 static __inline__ void enable_dma(unsigned int dmanr)
0174 {
0175 if (dmanr<=3)
0176 dma_outb(dmanr, DMA1_MASK_REG);
0177 else
0178 dma_outb(dmanr & 3, DMA2_MASK_REG);
0179 }
0180
0181 static __inline__ void disable_dma(unsigned int dmanr)
0182 {
0183 if (dmanr<=3)
0184 dma_outb(dmanr | 4, DMA1_MASK_REG);
0185 else
0186 dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
0187 }
0188
0189
0190
0191
0192
0193
0194
0195
0196 static __inline__ void clear_dma_ff(unsigned int dmanr)
0197 {
0198 if (dmanr<=3)
0199 dma_outb(0, DMA1_CLEAR_FF_REG);
0200 else
0201 dma_outb(0, DMA2_CLEAR_FF_REG);
0202 }
0203
0204
0205 static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
0206 {
0207 if (dmanr<=3)
0208 dma_outb(mode | dmanr, DMA1_MODE_REG);
0209 else
0210 dma_outb(mode | (dmanr&3), DMA2_MODE_REG);
0211 }
0212
0213
0214
0215
0216
0217
0218 static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
0219 {
0220 switch(dmanr) {
0221 case 0:
0222 dma_outb(pagenr, DMA_PAGE_0);
0223 break;
0224 case 1:
0225 dma_outb(pagenr, DMA_PAGE_1);
0226 break;
0227 case 2:
0228 dma_outb(pagenr, DMA_PAGE_2);
0229 break;
0230 case 3:
0231 dma_outb(pagenr, DMA_PAGE_3);
0232 break;
0233 case 5:
0234 dma_outb(pagenr & 0xfe, DMA_PAGE_5);
0235 break;
0236 case 6:
0237 dma_outb(pagenr & 0xfe, DMA_PAGE_6);
0238 break;
0239 case 7:
0240 dma_outb(pagenr & 0xfe, DMA_PAGE_7);
0241 break;
0242 }
0243 }
0244
0245
0246
0247
0248
0249 static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
0250 {
0251 set_dma_page(dmanr, a>>16);
0252 if (dmanr <= 3) {
0253 dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
0254 dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
0255 } else {
0256 dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
0257 dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
0258 }
0259 }
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270 static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
0271 {
0272 count--;
0273 if (dmanr <= 3) {
0274 dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
0275 dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
0276 } else {
0277 dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
0278 dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
0279 }
0280 }
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291 static __inline__ int get_dma_residue(unsigned int dmanr)
0292 {
0293 unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
0294 : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
0295
0296
0297 unsigned short count;
0298
0299 count = 1 + dma_inb(io_port);
0300 count += dma_inb(io_port) << 8;
0301
0302 return (dmanr<=3)? count : (count<<1);
0303 }
0304
0305
0306
0307 extern int request_dma(unsigned int dmanr, const char * device_id);
0308 extern void free_dma(unsigned int dmanr);
0309
0310 #endif