0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/dma-mapping.h>
0011 #include <linux/errno.h>
0012 #include <linux/firewire.h>
0013 #include <linux/firewire-constants.h>
0014 #include <linux/kernel.h>
0015 #include <linux/mm.h>
0016 #include <linux/slab.h>
0017 #include <linux/spinlock.h>
0018 #include <linux/vmalloc.h>
0019 #include <linux/export.h>
0020
0021 #include <asm/byteorder.h>
0022
0023 #include "core.h"
0024
0025
0026
0027
0028
0029 int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count)
0030 {
0031 int i;
0032
0033 buffer->page_count = 0;
0034 buffer->page_count_mapped = 0;
0035 buffer->pages = kmalloc_array(page_count, sizeof(buffer->pages[0]),
0036 GFP_KERNEL);
0037 if (buffer->pages == NULL)
0038 return -ENOMEM;
0039
0040 for (i = 0; i < page_count; i++) {
0041 buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
0042 if (buffer->pages[i] == NULL)
0043 break;
0044 }
0045 buffer->page_count = i;
0046 if (i < page_count) {
0047 fw_iso_buffer_destroy(buffer, NULL);
0048 return -ENOMEM;
0049 }
0050
0051 return 0;
0052 }
0053
0054 int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card,
0055 enum dma_data_direction direction)
0056 {
0057 dma_addr_t address;
0058 int i;
0059
0060 buffer->direction = direction;
0061
0062 for (i = 0; i < buffer->page_count; i++) {
0063 address = dma_map_page(card->device, buffer->pages[i],
0064 0, PAGE_SIZE, direction);
0065 if (dma_mapping_error(card->device, address))
0066 break;
0067
0068 set_page_private(buffer->pages[i], address);
0069 }
0070 buffer->page_count_mapped = i;
0071 if (i < buffer->page_count)
0072 return -ENOMEM;
0073
0074 return 0;
0075 }
0076
0077 int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
0078 int page_count, enum dma_data_direction direction)
0079 {
0080 int ret;
0081
0082 ret = fw_iso_buffer_alloc(buffer, page_count);
0083 if (ret < 0)
0084 return ret;
0085
0086 ret = fw_iso_buffer_map_dma(buffer, card, direction);
0087 if (ret < 0)
0088 fw_iso_buffer_destroy(buffer, card);
0089
0090 return ret;
0091 }
0092 EXPORT_SYMBOL(fw_iso_buffer_init);
0093
0094 void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
0095 struct fw_card *card)
0096 {
0097 int i;
0098 dma_addr_t address;
0099
0100 for (i = 0; i < buffer->page_count_mapped; i++) {
0101 address = page_private(buffer->pages[i]);
0102 dma_unmap_page(card->device, address,
0103 PAGE_SIZE, buffer->direction);
0104 }
0105 for (i = 0; i < buffer->page_count; i++)
0106 __free_page(buffer->pages[i]);
0107
0108 kfree(buffer->pages);
0109 buffer->pages = NULL;
0110 buffer->page_count = 0;
0111 buffer->page_count_mapped = 0;
0112 }
0113 EXPORT_SYMBOL(fw_iso_buffer_destroy);
0114
0115
0116 size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed)
0117 {
0118 size_t i;
0119 dma_addr_t address;
0120 ssize_t offset;
0121
0122 for (i = 0; i < buffer->page_count; i++) {
0123 address = page_private(buffer->pages[i]);
0124 offset = (ssize_t)completed - (ssize_t)address;
0125 if (offset > 0 && offset <= PAGE_SIZE)
0126 return (i << PAGE_SHIFT) + offset;
0127 }
0128
0129 return 0;
0130 }
0131
0132 struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
0133 int type, int channel, int speed, size_t header_size,
0134 fw_iso_callback_t callback, void *callback_data)
0135 {
0136 struct fw_iso_context *ctx;
0137
0138 ctx = card->driver->allocate_iso_context(card,
0139 type, channel, header_size);
0140 if (IS_ERR(ctx))
0141 return ctx;
0142
0143 ctx->card = card;
0144 ctx->type = type;
0145 ctx->channel = channel;
0146 ctx->speed = speed;
0147 ctx->header_size = header_size;
0148 ctx->callback.sc = callback;
0149 ctx->callback_data = callback_data;
0150
0151 return ctx;
0152 }
0153 EXPORT_SYMBOL(fw_iso_context_create);
0154
0155 void fw_iso_context_destroy(struct fw_iso_context *ctx)
0156 {
0157 ctx->card->driver->free_iso_context(ctx);
0158 }
0159 EXPORT_SYMBOL(fw_iso_context_destroy);
0160
0161 int fw_iso_context_start(struct fw_iso_context *ctx,
0162 int cycle, int sync, int tags)
0163 {
0164 return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
0165 }
0166 EXPORT_SYMBOL(fw_iso_context_start);
0167
0168 int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels)
0169 {
0170 return ctx->card->driver->set_iso_channels(ctx, channels);
0171 }
0172
0173 int fw_iso_context_queue(struct fw_iso_context *ctx,
0174 struct fw_iso_packet *packet,
0175 struct fw_iso_buffer *buffer,
0176 unsigned long payload)
0177 {
0178 return ctx->card->driver->queue_iso(ctx, packet, buffer, payload);
0179 }
0180 EXPORT_SYMBOL(fw_iso_context_queue);
0181
0182 void fw_iso_context_queue_flush(struct fw_iso_context *ctx)
0183 {
0184 ctx->card->driver->flush_queue_iso(ctx);
0185 }
0186 EXPORT_SYMBOL(fw_iso_context_queue_flush);
0187
0188 int fw_iso_context_flush_completions(struct fw_iso_context *ctx)
0189 {
0190 return ctx->card->driver->flush_iso_completions(ctx);
0191 }
0192 EXPORT_SYMBOL(fw_iso_context_flush_completions);
0193
0194 int fw_iso_context_stop(struct fw_iso_context *ctx)
0195 {
0196 return ctx->card->driver->stop_iso(ctx);
0197 }
0198 EXPORT_SYMBOL(fw_iso_context_stop);
0199
0200
0201
0202
0203
0204 static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
0205 int bandwidth, bool allocate)
0206 {
0207 int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0;
0208 __be32 data[2];
0209
0210
0211
0212
0213
0214
0215 for (try = 0; try < 5; try++) {
0216 new = allocate ? old - bandwidth : old + bandwidth;
0217 if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL)
0218 return -EBUSY;
0219
0220 data[0] = cpu_to_be32(old);
0221 data[1] = cpu_to_be32(new);
0222 switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
0223 irm_id, generation, SCODE_100,
0224 CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE,
0225 data, 8)) {
0226 case RCODE_GENERATION:
0227
0228 return allocate ? -EAGAIN : bandwidth;
0229
0230 case RCODE_COMPLETE:
0231 if (be32_to_cpup(data) == old)
0232 return bandwidth;
0233
0234 old = be32_to_cpup(data);
0235
0236 }
0237 }
0238
0239 return -EIO;
0240 }
0241
0242 static int manage_channel(struct fw_card *card, int irm_id, int generation,
0243 u32 channels_mask, u64 offset, bool allocate)
0244 {
0245 __be32 bit, all, old;
0246 __be32 data[2];
0247 int channel, ret = -EIO, retry = 5;
0248
0249 old = all = allocate ? cpu_to_be32(~0) : 0;
0250
0251 for (channel = 0; channel < 32; channel++) {
0252 if (!(channels_mask & 1 << channel))
0253 continue;
0254
0255 ret = -EBUSY;
0256
0257 bit = cpu_to_be32(1 << (31 - channel));
0258 if ((old & bit) != (all & bit))
0259 continue;
0260
0261 data[0] = old;
0262 data[1] = old ^ bit;
0263 switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
0264 irm_id, generation, SCODE_100,
0265 offset, data, 8)) {
0266 case RCODE_GENERATION:
0267
0268 return allocate ? -EAGAIN : channel;
0269
0270 case RCODE_COMPLETE:
0271 if (data[0] == old)
0272 return channel;
0273
0274 old = data[0];
0275
0276
0277 if ((data[0] & bit) == (data[1] & bit))
0278 continue;
0279
0280 fallthrough;
0281 default:
0282 if (retry) {
0283 retry--;
0284 channel--;
0285 } else {
0286 ret = -EIO;
0287 }
0288 }
0289 }
0290
0291 return ret;
0292 }
0293
0294 static void deallocate_channel(struct fw_card *card, int irm_id,
0295 int generation, int channel)
0296 {
0297 u32 mask;
0298 u64 offset;
0299
0300 mask = channel < 32 ? 1 << channel : 1 << (channel - 32);
0301 offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI :
0302 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO;
0303
0304 manage_channel(card, irm_id, generation, mask, offset, false);
0305 }
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338 void fw_iso_resource_manage(struct fw_card *card, int generation,
0339 u64 channels_mask, int *channel, int *bandwidth,
0340 bool allocate)
0341 {
0342 u32 channels_hi = channels_mask;
0343 u32 channels_lo = channels_mask >> 32;
0344 int irm_id, ret, c = -EINVAL;
0345
0346 spin_lock_irq(&card->lock);
0347 irm_id = card->irm_node->node_id;
0348 spin_unlock_irq(&card->lock);
0349
0350 if (channels_hi)
0351 c = manage_channel(card, irm_id, generation, channels_hi,
0352 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI,
0353 allocate);
0354 if (channels_lo && c < 0) {
0355 c = manage_channel(card, irm_id, generation, channels_lo,
0356 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO,
0357 allocate);
0358 if (c >= 0)
0359 c += 32;
0360 }
0361 *channel = c;
0362
0363 if (allocate && channels_mask != 0 && c < 0)
0364 *bandwidth = 0;
0365
0366 if (*bandwidth == 0)
0367 return;
0368
0369 ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate);
0370 if (ret < 0)
0371 *bandwidth = 0;
0372
0373 if (allocate && ret < 0) {
0374 if (c >= 0)
0375 deallocate_channel(card, irm_id, generation, c);
0376 *channel = ret;
0377 }
0378 }
0379 EXPORT_SYMBOL(fw_iso_resource_manage);