Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Audio and Music Data Transmission Protocol (IEC 61883-6) streams
0004  * with Common Isochronous Packet (IEC 61883-1) headers
0005  *
0006  * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
0007  */
0008 
0009 #include <linux/device.h>
0010 #include <linux/err.h>
0011 #include <linux/firewire.h>
0012 #include <linux/firewire-constants.h>
0013 #include <linux/module.h>
0014 #include <linux/slab.h>
0015 #include <sound/pcm.h>
0016 #include <sound/pcm_params.h>
0017 #include "amdtp-stream.h"
0018 
0019 #define TICKS_PER_CYCLE     3072
0020 #define CYCLES_PER_SECOND   8000
0021 #define TICKS_PER_SECOND    (TICKS_PER_CYCLE * CYCLES_PER_SECOND)
0022 
0023 #define OHCI_SECOND_MODULUS     8
0024 
0025 /* Always support Linux tracing subsystem. */
0026 #define CREATE_TRACE_POINTS
0027 #include "amdtp-stream-trace.h"
0028 
0029 #define TRANSFER_DELAY_TICKS    0x2e00 /* 479.17 microseconds */
0030 
0031 /* isochronous header parameters */
0032 #define ISO_DATA_LENGTH_SHIFT   16
0033 #define TAG_NO_CIP_HEADER   0
0034 #define TAG_CIP         1
0035 
0036 // Common Isochronous Packet (CIP) header parameters. Use two quadlets CIP header when supported.
0037 #define CIP_HEADER_QUADLETS 2
0038 #define CIP_EOH_SHIFT       31
0039 #define CIP_EOH         (1u << CIP_EOH_SHIFT)
0040 #define CIP_EOH_MASK        0x80000000
0041 #define CIP_SID_SHIFT       24
0042 #define CIP_SID_MASK        0x3f000000
0043 #define CIP_DBS_MASK        0x00ff0000
0044 #define CIP_DBS_SHIFT       16
0045 #define CIP_SPH_MASK        0x00000400
0046 #define CIP_SPH_SHIFT       10
0047 #define CIP_DBC_MASK        0x000000ff
0048 #define CIP_FMT_SHIFT       24
0049 #define CIP_FMT_MASK        0x3f000000
0050 #define CIP_FDF_MASK        0x00ff0000
0051 #define CIP_FDF_SHIFT       16
0052 #define CIP_FDF_NO_DATA     0xff
0053 #define CIP_SYT_MASK        0x0000ffff
0054 #define CIP_SYT_NO_INFO     0xffff
0055 #define CIP_SYT_CYCLE_MODULUS   16
0056 #define CIP_NO_DATA     ((CIP_FDF_NO_DATA << CIP_FDF_SHIFT) | CIP_SYT_NO_INFO)
0057 
0058 #define CIP_HEADER_SIZE     (sizeof(__be32) * CIP_HEADER_QUADLETS)
0059 
0060 /* Audio and Music transfer protocol specific parameters */
0061 #define CIP_FMT_AM      0x10
0062 #define AMDTP_FDF_NO_DATA   0xff
0063 
0064 // For iso header and tstamp.
0065 #define IR_CTX_HEADER_DEFAULT_QUADLETS  2
0066 // Add nothing.
0067 #define IR_CTX_HEADER_SIZE_NO_CIP   (sizeof(__be32) * IR_CTX_HEADER_DEFAULT_QUADLETS)
0068 // Add two quadlets CIP header.
0069 #define IR_CTX_HEADER_SIZE_CIP      (IR_CTX_HEADER_SIZE_NO_CIP + CIP_HEADER_SIZE)
0070 #define HEADER_TSTAMP_MASK  0x0000ffff
0071 
0072 #define IT_PKT_HEADER_SIZE_CIP      CIP_HEADER_SIZE
0073 #define IT_PKT_HEADER_SIZE_NO_CIP   0 // Nothing.
0074 
0075 // The initial firmware of OXFW970 can postpone transmission of packet during finishing
0076 // asynchronous transaction. This module accepts 5 cycles to skip as maximum to avoid buffer
0077 // overrun. Actual device can skip more, then this module stops the packet streaming.
0078 #define IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES    5
0079 
0080 /**
0081  * amdtp_stream_init - initialize an AMDTP stream structure
0082  * @s: the AMDTP stream to initialize
0083  * @unit: the target of the stream
0084  * @dir: the direction of stream
0085  * @flags: the details of the streaming protocol consist of cip_flags enumeration-constants.
0086  * @fmt: the value of fmt field in CIP header
0087  * @process_ctx_payloads: callback handler to process payloads of isoc context
0088  * @protocol_size: the size to allocate newly for protocol
0089  */
0090 int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
0091               enum amdtp_stream_direction dir, unsigned int flags,
0092               unsigned int fmt,
0093               amdtp_stream_process_ctx_payloads_t process_ctx_payloads,
0094               unsigned int protocol_size)
0095 {
0096     if (process_ctx_payloads == NULL)
0097         return -EINVAL;
0098 
0099     s->protocol = kzalloc(protocol_size, GFP_KERNEL);
0100     if (!s->protocol)
0101         return -ENOMEM;
0102 
0103     s->unit = unit;
0104     s->direction = dir;
0105     s->flags = flags;
0106     s->context = ERR_PTR(-1);
0107     mutex_init(&s->mutex);
0108     s->packet_index = 0;
0109 
0110     init_waitqueue_head(&s->ready_wait);
0111 
0112     s->fmt = fmt;
0113     s->process_ctx_payloads = process_ctx_payloads;
0114 
0115     return 0;
0116 }
0117 EXPORT_SYMBOL(amdtp_stream_init);
0118 
0119 /**
0120  * amdtp_stream_destroy - free stream resources
0121  * @s: the AMDTP stream to destroy
0122  */
0123 void amdtp_stream_destroy(struct amdtp_stream *s)
0124 {
0125     /* Not initialized. */
0126     if (s->protocol == NULL)
0127         return;
0128 
0129     WARN_ON(amdtp_stream_running(s));
0130     kfree(s->protocol);
0131     mutex_destroy(&s->mutex);
0132 }
0133 EXPORT_SYMBOL(amdtp_stream_destroy);
0134 
0135 const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT] = {
0136     [CIP_SFC_32000]  =  8,
0137     [CIP_SFC_44100]  =  8,
0138     [CIP_SFC_48000]  =  8,
0139     [CIP_SFC_88200]  = 16,
0140     [CIP_SFC_96000]  = 16,
0141     [CIP_SFC_176400] = 32,
0142     [CIP_SFC_192000] = 32,
0143 };
0144 EXPORT_SYMBOL(amdtp_syt_intervals);
0145 
0146 const unsigned int amdtp_rate_table[CIP_SFC_COUNT] = {
0147     [CIP_SFC_32000]  =  32000,
0148     [CIP_SFC_44100]  =  44100,
0149     [CIP_SFC_48000]  =  48000,
0150     [CIP_SFC_88200]  =  88200,
0151     [CIP_SFC_96000]  =  96000,
0152     [CIP_SFC_176400] = 176400,
0153     [CIP_SFC_192000] = 192000,
0154 };
0155 EXPORT_SYMBOL(amdtp_rate_table);
0156 
0157 static int apply_constraint_to_size(struct snd_pcm_hw_params *params,
0158                     struct snd_pcm_hw_rule *rule)
0159 {
0160     struct snd_interval *s = hw_param_interval(params, rule->var);
0161     const struct snd_interval *r =
0162         hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
0163     struct snd_interval t = {0};
0164     unsigned int step = 0;
0165     int i;
0166 
0167     for (i = 0; i < CIP_SFC_COUNT; ++i) {
0168         if (snd_interval_test(r, amdtp_rate_table[i]))
0169             step = max(step, amdtp_syt_intervals[i]);
0170     }
0171 
0172     t.min = roundup(s->min, step);
0173     t.max = rounddown(s->max, step);
0174     t.integer = 1;
0175 
0176     return snd_interval_refine(s, &t);
0177 }
0178 
0179 /**
0180  * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream
0181  * @s:      the AMDTP stream, which must be initialized.
0182  * @runtime:    the PCM substream runtime
0183  */
0184 int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
0185                     struct snd_pcm_runtime *runtime)
0186 {
0187     struct snd_pcm_hardware *hw = &runtime->hw;
0188     unsigned int ctx_header_size;
0189     unsigned int maximum_usec_per_period;
0190     int err;
0191 
0192     hw->info = SNDRV_PCM_INFO_BLOCK_TRANSFER |
0193            SNDRV_PCM_INFO_INTERLEAVED |
0194            SNDRV_PCM_INFO_JOINT_DUPLEX |
0195            SNDRV_PCM_INFO_MMAP |
0196            SNDRV_PCM_INFO_MMAP_VALID |
0197            SNDRV_PCM_INFO_NO_PERIOD_WAKEUP;
0198 
0199     hw->periods_min = 2;
0200     hw->periods_max = UINT_MAX;
0201 
0202     /* bytes for a frame */
0203     hw->period_bytes_min = 4 * hw->channels_max;
0204 
0205     /* Just to prevent from allocating much pages. */
0206     hw->period_bytes_max = hw->period_bytes_min * 2048;
0207     hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min;
0208 
0209     // Linux driver for 1394 OHCI controller voluntarily flushes isoc
0210     // context when total size of accumulated context header reaches
0211     // PAGE_SIZE. This kicks work for the isoc context and brings
0212     // callback in the middle of scheduled interrupts.
0213     // Although AMDTP streams in the same domain use the same events per
0214     // IRQ, use the largest size of context header between IT/IR contexts.
0215     // Here, use the value of context header in IR context is for both
0216     // contexts.
0217     if (!(s->flags & CIP_NO_HEADER))
0218         ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
0219     else
0220         ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
0221     maximum_usec_per_period = USEC_PER_SEC * PAGE_SIZE /
0222                   CYCLES_PER_SECOND / ctx_header_size;
0223 
0224     // In IEC 61883-6, one isoc packet can transfer events up to the value
0225     // of syt interval. This comes from the interval of isoc cycle. As 1394
0226     // OHCI controller can generate hardware IRQ per isoc packet, the
0227     // interval is 125 usec.
0228     // However, there are two ways of transmission in IEC 61883-6; blocking
0229     // and non-blocking modes. In blocking mode, the sequence of isoc packet
0230     // includes 'empty' or 'NODATA' packets which include no event. In
0231     // non-blocking mode, the number of events per packet is variable up to
0232     // the syt interval.
0233     // Due to the above protocol design, the minimum PCM frames per
0234     // interrupt should be double of the value of syt interval, thus it is
0235     // 250 usec.
0236     err = snd_pcm_hw_constraint_minmax(runtime,
0237                        SNDRV_PCM_HW_PARAM_PERIOD_TIME,
0238                        250, maximum_usec_per_period);
0239     if (err < 0)
0240         goto end;
0241 
0242     /* Non-Blocking stream has no more constraints */
0243     if (!(s->flags & CIP_BLOCKING))
0244         goto end;
0245 
0246     /*
0247      * One AMDTP packet can include some frames. In blocking mode, the
0248      * number equals to SYT_INTERVAL. So the number is 8, 16 or 32,
0249      * depending on its sampling rate. For accurate period interrupt, it's
0250      * preferrable to align period/buffer sizes to current SYT_INTERVAL.
0251      */
0252     err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
0253                   apply_constraint_to_size, NULL,
0254                   SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
0255                   SNDRV_PCM_HW_PARAM_RATE, -1);
0256     if (err < 0)
0257         goto end;
0258     err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
0259                   apply_constraint_to_size, NULL,
0260                   SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
0261                   SNDRV_PCM_HW_PARAM_RATE, -1);
0262     if (err < 0)
0263         goto end;
0264 end:
0265     return err;
0266 }
0267 EXPORT_SYMBOL(amdtp_stream_add_pcm_hw_constraints);
0268 
0269 /**
0270  * amdtp_stream_set_parameters - set stream parameters
0271  * @s: the AMDTP stream to configure
0272  * @rate: the sample rate
0273  * @data_block_quadlets: the size of a data block in quadlet unit
0274  *
0275  * The parameters must be set before the stream is started, and must not be
0276  * changed while the stream is running.
0277  */
0278 int amdtp_stream_set_parameters(struct amdtp_stream *s, unsigned int rate,
0279                 unsigned int data_block_quadlets)
0280 {
0281     unsigned int sfc;
0282 
0283     for (sfc = 0; sfc < ARRAY_SIZE(amdtp_rate_table); ++sfc) {
0284         if (amdtp_rate_table[sfc] == rate)
0285             break;
0286     }
0287     if (sfc == ARRAY_SIZE(amdtp_rate_table))
0288         return -EINVAL;
0289 
0290     s->sfc = sfc;
0291     s->data_block_quadlets = data_block_quadlets;
0292     s->syt_interval = amdtp_syt_intervals[sfc];
0293 
0294     // default buffering in the device.
0295     s->transfer_delay = TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE;
0296 
0297     // additional buffering needed to adjust for no-data packets.
0298     if (s->flags & CIP_BLOCKING)
0299         s->transfer_delay += TICKS_PER_SECOND * s->syt_interval / rate;
0300 
0301     return 0;
0302 }
0303 EXPORT_SYMBOL(amdtp_stream_set_parameters);
0304 
0305 // The CIP header is processed in context header apart from context payload.
0306 static int amdtp_stream_get_max_ctx_payload_size(struct amdtp_stream *s)
0307 {
0308     unsigned int multiplier;
0309 
0310     if (s->flags & CIP_JUMBO_PAYLOAD)
0311         multiplier = IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES;
0312     else
0313         multiplier = 1;
0314 
0315     return s->syt_interval * s->data_block_quadlets * sizeof(__be32) * multiplier;
0316 }
0317 
0318 /**
0319  * amdtp_stream_get_max_payload - get the stream's packet size
0320  * @s: the AMDTP stream
0321  *
0322  * This function must not be called before the stream has been configured
0323  * with amdtp_stream_set_parameters().
0324  */
0325 unsigned int amdtp_stream_get_max_payload(struct amdtp_stream *s)
0326 {
0327     unsigned int cip_header_size;
0328 
0329     if (!(s->flags & CIP_NO_HEADER))
0330         cip_header_size = CIP_HEADER_SIZE;
0331     else
0332         cip_header_size = 0;
0333 
0334     return cip_header_size + amdtp_stream_get_max_ctx_payload_size(s);
0335 }
0336 EXPORT_SYMBOL(amdtp_stream_get_max_payload);
0337 
0338 /**
0339  * amdtp_stream_pcm_prepare - prepare PCM device for running
0340  * @s: the AMDTP stream
0341  *
0342  * This function should be called from the PCM device's .prepare callback.
0343  */
0344 void amdtp_stream_pcm_prepare(struct amdtp_stream *s)
0345 {
0346     s->pcm_buffer_pointer = 0;
0347     s->pcm_period_pointer = 0;
0348 }
0349 EXPORT_SYMBOL(amdtp_stream_pcm_prepare);
0350 
0351 static void pool_blocking_data_blocks(struct amdtp_stream *s, struct seq_desc *descs,
0352                       const unsigned int seq_size, unsigned int seq_tail,
0353                       unsigned int count)
0354 {
0355     const unsigned int syt_interval = s->syt_interval;
0356     int i;
0357 
0358     for (i = 0; i < count; ++i) {
0359         struct seq_desc *desc = descs + seq_tail;
0360 
0361         if (desc->syt_offset != CIP_SYT_NO_INFO)
0362             desc->data_blocks = syt_interval;
0363         else
0364             desc->data_blocks = 0;
0365 
0366         seq_tail = (seq_tail + 1) % seq_size;
0367     }
0368 }
0369 
0370 static void pool_ideal_nonblocking_data_blocks(struct amdtp_stream *s, struct seq_desc *descs,
0371                            const unsigned int seq_size, unsigned int seq_tail,
0372                            unsigned int count)
0373 {
0374     const enum cip_sfc sfc = s->sfc;
0375     unsigned int state = s->ctx_data.rx.data_block_state;
0376     int i;
0377 
0378     for (i = 0; i < count; ++i) {
0379         struct seq_desc *desc = descs + seq_tail;
0380 
0381         if (!cip_sfc_is_base_44100(sfc)) {
0382             // Sample_rate / 8000 is an integer, and precomputed.
0383             desc->data_blocks = state;
0384         } else {
0385             unsigned int phase = state;
0386 
0387         /*
0388          * This calculates the number of data blocks per packet so that
0389          * 1) the overall rate is correct and exactly synchronized to
0390          *    the bus clock, and
0391          * 2) packets with a rounded-up number of blocks occur as early
0392          *    as possible in the sequence (to prevent underruns of the
0393          *    device's buffer).
0394          */
0395             if (sfc == CIP_SFC_44100)
0396                 /* 6 6 5 6 5 6 5 ... */
0397                 desc->data_blocks = 5 + ((phase & 1) ^ (phase == 0 || phase >= 40));
0398             else
0399                 /* 12 11 11 11 11 ... or 23 22 22 22 22 ... */
0400                 desc->data_blocks = 11 * (sfc >> 1) + (phase == 0);
0401             if (++phase >= (80 >> (sfc >> 1)))
0402                 phase = 0;
0403             state = phase;
0404         }
0405 
0406         seq_tail = (seq_tail + 1) % seq_size;
0407     }
0408 
0409     s->ctx_data.rx.data_block_state = state;
0410 }
0411 
0412 static unsigned int calculate_syt_offset(unsigned int *last_syt_offset,
0413             unsigned int *syt_offset_state, enum cip_sfc sfc)
0414 {
0415     unsigned int syt_offset;
0416 
0417     if (*last_syt_offset < TICKS_PER_CYCLE) {
0418         if (!cip_sfc_is_base_44100(sfc))
0419             syt_offset = *last_syt_offset + *syt_offset_state;
0420         else {
0421         /*
0422          * The time, in ticks, of the n'th SYT_INTERVAL sample is:
0423          *   n * SYT_INTERVAL * 24576000 / sample_rate
0424          * Modulo TICKS_PER_CYCLE, the difference between successive
0425          * elements is about 1386.23.  Rounding the results of this
0426          * formula to the SYT precision results in a sequence of
0427          * differences that begins with:
0428          *   1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ...
0429          * This code generates _exactly_ the same sequence.
0430          */
0431             unsigned int phase = *syt_offset_state;
0432             unsigned int index = phase % 13;
0433 
0434             syt_offset = *last_syt_offset;
0435             syt_offset += 1386 + ((index && !(index & 3)) ||
0436                           phase == 146);
0437             if (++phase >= 147)
0438                 phase = 0;
0439             *syt_offset_state = phase;
0440         }
0441     } else
0442         syt_offset = *last_syt_offset - TICKS_PER_CYCLE;
0443     *last_syt_offset = syt_offset;
0444 
0445     if (syt_offset >= TICKS_PER_CYCLE)
0446         syt_offset = CIP_SYT_NO_INFO;
0447 
0448     return syt_offset;
0449 }
0450 
0451 static void pool_ideal_syt_offsets(struct amdtp_stream *s, struct seq_desc *descs,
0452                    const unsigned int seq_size, unsigned int seq_tail,
0453                    unsigned int count)
0454 {
0455     const enum cip_sfc sfc = s->sfc;
0456     unsigned int last = s->ctx_data.rx.last_syt_offset;
0457     unsigned int state = s->ctx_data.rx.syt_offset_state;
0458     int i;
0459 
0460     for (i = 0; i < count; ++i) {
0461         struct seq_desc *desc = descs + seq_tail;
0462 
0463         desc->syt_offset = calculate_syt_offset(&last, &state, sfc);
0464 
0465         seq_tail = (seq_tail + 1) % seq_size;
0466     }
0467 
0468     s->ctx_data.rx.last_syt_offset = last;
0469     s->ctx_data.rx.syt_offset_state = state;
0470 }
0471 
0472 static unsigned int compute_syt_offset(unsigned int syt, unsigned int cycle,
0473                        unsigned int transfer_delay)
0474 {
0475     unsigned int cycle_lo = (cycle % CYCLES_PER_SECOND) & 0x0f;
0476     unsigned int syt_cycle_lo = (syt & 0xf000) >> 12;
0477     unsigned int syt_offset;
0478 
0479     // Round up.
0480     if (syt_cycle_lo < cycle_lo)
0481         syt_cycle_lo += CIP_SYT_CYCLE_MODULUS;
0482     syt_cycle_lo -= cycle_lo;
0483 
0484     // Subtract transfer delay so that the synchronization offset is not so large
0485     // at transmission.
0486     syt_offset = syt_cycle_lo * TICKS_PER_CYCLE + (syt & 0x0fff);
0487     if (syt_offset < transfer_delay)
0488         syt_offset += CIP_SYT_CYCLE_MODULUS * TICKS_PER_CYCLE;
0489 
0490     return syt_offset - transfer_delay;
0491 }
0492 
0493 // Both of the producer and consumer of the queue runs in the same clock of IEEE 1394 bus.
0494 // Additionally, the sequence of tx packets is severely checked against any discontinuity
0495 // before filling entries in the queue. The calculation is safe even if it looks fragile by
0496 // overrun.
0497 static unsigned int calculate_cached_cycle_count(struct amdtp_stream *s, unsigned int head)
0498 {
0499     const unsigned int cache_size = s->ctx_data.tx.cache.size;
0500     unsigned int cycles = s->ctx_data.tx.cache.tail;
0501 
0502     if (cycles < head)
0503         cycles += cache_size;
0504     cycles -= head;
0505 
0506     return cycles;
0507 }
0508 
0509 static void cache_seq(struct amdtp_stream *s, const struct pkt_desc *descs, unsigned int desc_count)
0510 {
0511     const unsigned int transfer_delay = s->transfer_delay;
0512     const unsigned int cache_size = s->ctx_data.tx.cache.size;
0513     struct seq_desc *cache = s->ctx_data.tx.cache.descs;
0514     unsigned int cache_tail = s->ctx_data.tx.cache.tail;
0515     bool aware_syt = !(s->flags & CIP_UNAWARE_SYT);
0516     int i;
0517 
0518     for (i = 0; i < desc_count; ++i) {
0519         struct seq_desc *dst = cache + cache_tail;
0520         const struct pkt_desc *src = descs + i;
0521 
0522         if (aware_syt && src->syt != CIP_SYT_NO_INFO)
0523             dst->syt_offset = compute_syt_offset(src->syt, src->cycle, transfer_delay);
0524         else
0525             dst->syt_offset = CIP_SYT_NO_INFO;
0526         dst->data_blocks = src->data_blocks;
0527 
0528         cache_tail = (cache_tail + 1) % cache_size;
0529     }
0530 
0531     s->ctx_data.tx.cache.tail = cache_tail;
0532 }
0533 
0534 static void pool_ideal_seq_descs(struct amdtp_stream *s, unsigned int count)
0535 {
0536     struct seq_desc *descs = s->ctx_data.rx.seq.descs;
0537     unsigned int seq_tail = s->ctx_data.rx.seq.tail;
0538     const unsigned int seq_size = s->ctx_data.rx.seq.size;
0539 
0540     pool_ideal_syt_offsets(s, descs, seq_size, seq_tail, count);
0541 
0542     if (s->flags & CIP_BLOCKING)
0543         pool_blocking_data_blocks(s, descs, seq_size, seq_tail, count);
0544     else
0545         pool_ideal_nonblocking_data_blocks(s, descs, seq_size, seq_tail, count);
0546 
0547     s->ctx_data.rx.seq.tail = (seq_tail + count) % seq_size;
0548 }
0549 
0550 static void pool_replayed_seq(struct amdtp_stream *s, unsigned int count)
0551 {
0552     struct amdtp_stream *target = s->ctx_data.rx.replay_target;
0553     const struct seq_desc *cache = target->ctx_data.tx.cache.descs;
0554     const unsigned int cache_size = target->ctx_data.tx.cache.size;
0555     unsigned int cache_head = s->ctx_data.rx.cache_head;
0556     struct seq_desc *descs = s->ctx_data.rx.seq.descs;
0557     const unsigned int seq_size = s->ctx_data.rx.seq.size;
0558     unsigned int seq_tail = s->ctx_data.rx.seq.tail;
0559     int i;
0560 
0561     for (i = 0; i < count; ++i) {
0562         descs[seq_tail] = cache[cache_head];
0563         seq_tail = (seq_tail + 1) % seq_size;
0564         cache_head = (cache_head + 1) % cache_size;
0565     }
0566 
0567     s->ctx_data.rx.seq.tail = seq_tail;
0568     s->ctx_data.rx.cache_head = cache_head;
0569 }
0570 
0571 static void pool_seq_descs(struct amdtp_stream *s, unsigned int count)
0572 {
0573     struct amdtp_domain *d = s->domain;
0574 
0575     if (!d->replay.enable || !s->ctx_data.rx.replay_target) {
0576         pool_ideal_seq_descs(s, count);
0577     } else {
0578         if (!d->replay.on_the_fly) {
0579             pool_replayed_seq(s, count);
0580         } else {
0581             struct amdtp_stream *tx = s->ctx_data.rx.replay_target;
0582             const unsigned int cache_size = tx->ctx_data.tx.cache.size;
0583             const unsigned int cache_head = s->ctx_data.rx.cache_head;
0584             unsigned int cached_cycles = calculate_cached_cycle_count(tx, cache_head);
0585 
0586             if (cached_cycles > count && cached_cycles > cache_size / 2)
0587                 pool_replayed_seq(s, count);
0588             else
0589                 pool_ideal_seq_descs(s, count);
0590         }
0591     }
0592 }
0593 
0594 static void update_pcm_pointers(struct amdtp_stream *s,
0595                 struct snd_pcm_substream *pcm,
0596                 unsigned int frames)
0597 {
0598     unsigned int ptr;
0599 
0600     ptr = s->pcm_buffer_pointer + frames;
0601     if (ptr >= pcm->runtime->buffer_size)
0602         ptr -= pcm->runtime->buffer_size;
0603     WRITE_ONCE(s->pcm_buffer_pointer, ptr);
0604 
0605     s->pcm_period_pointer += frames;
0606     if (s->pcm_period_pointer >= pcm->runtime->period_size) {
0607         s->pcm_period_pointer -= pcm->runtime->period_size;
0608 
0609         // The program in user process should periodically check the status of intermediate
0610         // buffer associated to PCM substream to process PCM frames in the buffer, instead
0611         // of receiving notification of period elapsed by poll wait.
0612         if (!pcm->runtime->no_period_wakeup) {
0613             if (in_softirq()) {
0614                 // In software IRQ context for 1394 OHCI.
0615                 snd_pcm_period_elapsed(pcm);
0616             } else {
0617                 // In process context of ALSA PCM application under acquired lock of
0618                 // PCM substream.
0619                 snd_pcm_period_elapsed_under_stream_lock(pcm);
0620             }
0621         }
0622     }
0623 }
0624 
0625 static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params,
0626             bool sched_irq)
0627 {
0628     int err;
0629 
0630     params->interrupt = sched_irq;
0631     params->tag = s->tag;
0632     params->sy = 0;
0633 
0634     err = fw_iso_context_queue(s->context, params, &s->buffer.iso_buffer,
0635                    s->buffer.packets[s->packet_index].offset);
0636     if (err < 0) {
0637         dev_err(&s->unit->device, "queueing error: %d\n", err);
0638         goto end;
0639     }
0640 
0641     if (++s->packet_index >= s->queue_size)
0642         s->packet_index = 0;
0643 end:
0644     return err;
0645 }
0646 
0647 static inline int queue_out_packet(struct amdtp_stream *s,
0648                    struct fw_iso_packet *params, bool sched_irq)
0649 {
0650     params->skip =
0651         !!(params->header_length == 0 && params->payload_length == 0);
0652     return queue_packet(s, params, sched_irq);
0653 }
0654 
0655 static inline int queue_in_packet(struct amdtp_stream *s,
0656                   struct fw_iso_packet *params)
0657 {
0658     // Queue one packet for IR context.
0659     params->header_length = s->ctx_data.tx.ctx_header_size;
0660     params->payload_length = s->ctx_data.tx.max_ctx_payload_length;
0661     params->skip = false;
0662     return queue_packet(s, params, false);
0663 }
0664 
0665 static void generate_cip_header(struct amdtp_stream *s, __be32 cip_header[2],
0666             unsigned int data_block_counter, unsigned int syt)
0667 {
0668     cip_header[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) |
0669                 (s->data_block_quadlets << CIP_DBS_SHIFT) |
0670                 ((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) |
0671                 data_block_counter);
0672     cip_header[1] = cpu_to_be32(CIP_EOH |
0673             ((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) |
0674             ((s->ctx_data.rx.fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) |
0675             (syt & CIP_SYT_MASK));
0676 }
0677 
0678 static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle,
0679                 struct fw_iso_packet *params, unsigned int header_length,
0680                 unsigned int data_blocks,
0681                 unsigned int data_block_counter,
0682                 unsigned int syt, unsigned int index)
0683 {
0684     unsigned int payload_length;
0685     __be32 *cip_header;
0686 
0687     payload_length = data_blocks * sizeof(__be32) * s->data_block_quadlets;
0688     params->payload_length = payload_length;
0689 
0690     if (header_length > 0) {
0691         cip_header = (__be32 *)params->header;
0692         generate_cip_header(s, cip_header, data_block_counter, syt);
0693         params->header_length = header_length;
0694     } else {
0695         cip_header = NULL;
0696     }
0697 
0698     trace_amdtp_packet(s, cycle, cip_header, payload_length + header_length, data_blocks,
0699                data_block_counter, s->packet_index, index);
0700 }
0701 
0702 static int check_cip_header(struct amdtp_stream *s, const __be32 *buf,
0703                 unsigned int payload_length,
0704                 unsigned int *data_blocks,
0705                 unsigned int *data_block_counter, unsigned int *syt)
0706 {
0707     u32 cip_header[2];
0708     unsigned int sph;
0709     unsigned int fmt;
0710     unsigned int fdf;
0711     unsigned int dbc;
0712     bool lost;
0713 
0714     cip_header[0] = be32_to_cpu(buf[0]);
0715     cip_header[1] = be32_to_cpu(buf[1]);
0716 
0717     /*
0718      * This module supports 'Two-quadlet CIP header with SYT field'.
0719      * For convenience, also check FMT field is AM824 or not.
0720      */
0721     if ((((cip_header[0] & CIP_EOH_MASK) == CIP_EOH) ||
0722          ((cip_header[1] & CIP_EOH_MASK) != CIP_EOH)) &&
0723         (!(s->flags & CIP_HEADER_WITHOUT_EOH))) {
0724         dev_info_ratelimited(&s->unit->device,
0725                 "Invalid CIP header for AMDTP: %08X:%08X\n",
0726                 cip_header[0], cip_header[1]);
0727         return -EAGAIN;
0728     }
0729 
0730     /* Check valid protocol or not. */
0731     sph = (cip_header[0] & CIP_SPH_MASK) >> CIP_SPH_SHIFT;
0732     fmt = (cip_header[1] & CIP_FMT_MASK) >> CIP_FMT_SHIFT;
0733     if (sph != s->sph || fmt != s->fmt) {
0734         dev_info_ratelimited(&s->unit->device,
0735                      "Detect unexpected protocol: %08x %08x\n",
0736                      cip_header[0], cip_header[1]);
0737         return -EAGAIN;
0738     }
0739 
0740     /* Calculate data blocks */
0741     fdf = (cip_header[1] & CIP_FDF_MASK) >> CIP_FDF_SHIFT;
0742     if (payload_length == 0 || (fmt == CIP_FMT_AM && fdf == AMDTP_FDF_NO_DATA)) {
0743         *data_blocks = 0;
0744     } else {
0745         unsigned int data_block_quadlets =
0746                 (cip_header[0] & CIP_DBS_MASK) >> CIP_DBS_SHIFT;
0747         /* avoid division by zero */
0748         if (data_block_quadlets == 0) {
0749             dev_err(&s->unit->device,
0750                 "Detect invalid value in dbs field: %08X\n",
0751                 cip_header[0]);
0752             return -EPROTO;
0753         }
0754         if (s->flags & CIP_WRONG_DBS)
0755             data_block_quadlets = s->data_block_quadlets;
0756 
0757         *data_blocks = payload_length / sizeof(__be32) / data_block_quadlets;
0758     }
0759 
0760     /* Check data block counter continuity */
0761     dbc = cip_header[0] & CIP_DBC_MASK;
0762     if (*data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) &&
0763         *data_block_counter != UINT_MAX)
0764         dbc = *data_block_counter;
0765 
0766     if ((dbc == 0x00 && (s->flags & CIP_SKIP_DBC_ZERO_CHECK)) ||
0767         *data_block_counter == UINT_MAX) {
0768         lost = false;
0769     } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
0770         lost = dbc != *data_block_counter;
0771     } else {
0772         unsigned int dbc_interval;
0773 
0774         if (*data_blocks > 0 && s->ctx_data.tx.dbc_interval > 0)
0775             dbc_interval = s->ctx_data.tx.dbc_interval;
0776         else
0777             dbc_interval = *data_blocks;
0778 
0779         lost = dbc != ((*data_block_counter + dbc_interval) & 0xff);
0780     }
0781 
0782     if (lost) {
0783         dev_err(&s->unit->device,
0784             "Detect discontinuity of CIP: %02X %02X\n",
0785             *data_block_counter, dbc);
0786         return -EIO;
0787     }
0788 
0789     *data_block_counter = dbc;
0790 
0791     if (!(s->flags & CIP_UNAWARE_SYT))
0792         *syt = cip_header[1] & CIP_SYT_MASK;
0793 
0794     return 0;
0795 }
0796 
0797 static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle,
0798                    const __be32 *ctx_header,
0799                    unsigned int *data_blocks,
0800                    unsigned int *data_block_counter,
0801                    unsigned int *syt, unsigned int packet_index, unsigned int index)
0802 {
0803     unsigned int payload_length;
0804     const __be32 *cip_header;
0805     unsigned int cip_header_size;
0806 
0807     payload_length = be32_to_cpu(ctx_header[0]) >> ISO_DATA_LENGTH_SHIFT;
0808 
0809     if (!(s->flags & CIP_NO_HEADER))
0810         cip_header_size = CIP_HEADER_SIZE;
0811     else
0812         cip_header_size = 0;
0813 
0814     if (payload_length > cip_header_size + s->ctx_data.tx.max_ctx_payload_length) {
0815         dev_err(&s->unit->device,
0816             "Detect jumbo payload: %04x %04x\n",
0817             payload_length, cip_header_size + s->ctx_data.tx.max_ctx_payload_length);
0818         return -EIO;
0819     }
0820 
0821     if (cip_header_size > 0) {
0822         if (payload_length >= cip_header_size) {
0823             int err;
0824 
0825             cip_header = ctx_header + IR_CTX_HEADER_DEFAULT_QUADLETS;
0826             err = check_cip_header(s, cip_header, payload_length - cip_header_size,
0827                            data_blocks, data_block_counter, syt);
0828             if (err < 0)
0829                 return err;
0830         } else {
0831             // Handle the cycle so that empty packet arrives.
0832             cip_header = NULL;
0833             *data_blocks = 0;
0834             *syt = 0;
0835         }
0836     } else {
0837         cip_header = NULL;
0838         *data_blocks = payload_length / sizeof(__be32) / s->data_block_quadlets;
0839         *syt = 0;
0840 
0841         if (*data_block_counter == UINT_MAX)
0842             *data_block_counter = 0;
0843     }
0844 
0845     trace_amdtp_packet(s, cycle, cip_header, payload_length, *data_blocks,
0846                *data_block_counter, packet_index, index);
0847 
0848     return 0;
0849 }
0850 
0851 // In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On
0852 // the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent
0853 // it. Thus, via Linux firewire subsystem, we can get the 3 bits for second.
0854 static inline u32 compute_ohci_cycle_count(__be32 ctx_header_tstamp)
0855 {
0856     u32 tstamp = be32_to_cpu(ctx_header_tstamp) & HEADER_TSTAMP_MASK;
0857     return (((tstamp >> 13) & 0x07) * 8000) + (tstamp & 0x1fff);
0858 }
0859 
0860 static inline u32 increment_ohci_cycle_count(u32 cycle, unsigned int addend)
0861 {
0862     cycle += addend;
0863     if (cycle >= OHCI_SECOND_MODULUS * CYCLES_PER_SECOND)
0864         cycle -= OHCI_SECOND_MODULUS * CYCLES_PER_SECOND;
0865     return cycle;
0866 }
0867 
0868 static int compare_ohci_cycle_count(u32 lval, u32 rval)
0869 {
0870     if (lval == rval)
0871         return 0;
0872     else if (lval < rval && rval - lval < OHCI_SECOND_MODULUS * CYCLES_PER_SECOND / 2)
0873         return -1;
0874     else
0875         return 1;
0876 }
0877 
0878 // Align to actual cycle count for the packet which is going to be scheduled.
0879 // This module queued the same number of isochronous cycle as the size of queue
0880 // to kip isochronous cycle, therefore it's OK to just increment the cycle by
0881 // the size of queue for scheduled cycle.
0882 static inline u32 compute_ohci_it_cycle(const __be32 ctx_header_tstamp,
0883                     unsigned int queue_size)
0884 {
0885     u32 cycle = compute_ohci_cycle_count(ctx_header_tstamp);
0886     return increment_ohci_cycle_count(cycle, queue_size);
0887 }
0888 
0889 static int generate_device_pkt_descs(struct amdtp_stream *s,
0890                      struct pkt_desc *descs,
0891                      const __be32 *ctx_header,
0892                      unsigned int packets,
0893                      unsigned int *desc_count)
0894 {
0895     unsigned int next_cycle = s->next_cycle;
0896     unsigned int dbc = s->data_block_counter;
0897     unsigned int packet_index = s->packet_index;
0898     unsigned int queue_size = s->queue_size;
0899     int i;
0900     int err;
0901 
0902     *desc_count = 0;
0903     for (i = 0; i < packets; ++i) {
0904         struct pkt_desc *desc = descs + *desc_count;
0905         unsigned int cycle;
0906         bool lost;
0907         unsigned int data_blocks;
0908         unsigned int syt;
0909 
0910         cycle = compute_ohci_cycle_count(ctx_header[1]);
0911         lost = (next_cycle != cycle);
0912         if (lost) {
0913             if (s->flags & CIP_NO_HEADER) {
0914                 // Fireface skips transmission just for an isoc cycle corresponding
0915                 // to empty packet.
0916                 unsigned int prev_cycle = next_cycle;
0917 
0918                 next_cycle = increment_ohci_cycle_count(next_cycle, 1);
0919                 lost = (next_cycle != cycle);
0920                 if (!lost) {
0921                     // Prepare a description for the skipped cycle for
0922                     // sequence replay.
0923                     desc->cycle = prev_cycle;
0924                     desc->syt = 0;
0925                     desc->data_blocks = 0;
0926                     desc->data_block_counter = dbc;
0927                     desc->ctx_payload = NULL;
0928                     ++desc;
0929                     ++(*desc_count);
0930                 }
0931             } else if (s->flags & CIP_JUMBO_PAYLOAD) {
0932                 // OXFW970 skips transmission for several isoc cycles during
0933                 // asynchronous transaction. The sequence replay is impossible due
0934                 // to the reason.
0935                 unsigned int safe_cycle = increment_ohci_cycle_count(next_cycle,
0936                                 IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES);
0937                 lost = (compare_ohci_cycle_count(safe_cycle, cycle) > 0);
0938             }
0939             if (lost) {
0940                 dev_err(&s->unit->device, "Detect discontinuity of cycle: %d %d\n",
0941                     next_cycle, cycle);
0942                 return -EIO;
0943             }
0944         }
0945 
0946         err = parse_ir_ctx_header(s, cycle, ctx_header, &data_blocks, &dbc, &syt,
0947                       packet_index, i);
0948         if (err < 0)
0949             return err;
0950 
0951         desc->cycle = cycle;
0952         desc->syt = syt;
0953         desc->data_blocks = data_blocks;
0954         desc->data_block_counter = dbc;
0955         desc->ctx_payload = s->buffer.packets[packet_index].buffer;
0956 
0957         if (!(s->flags & CIP_DBC_IS_END_EVENT))
0958             dbc = (dbc + desc->data_blocks) & 0xff;
0959 
0960         next_cycle = increment_ohci_cycle_count(next_cycle, 1);
0961         ++(*desc_count);
0962         ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
0963         packet_index = (packet_index + 1) % queue_size;
0964     }
0965 
0966     s->next_cycle = next_cycle;
0967     s->data_block_counter = dbc;
0968 
0969     return 0;
0970 }
0971 
0972 static unsigned int compute_syt(unsigned int syt_offset, unsigned int cycle,
0973                 unsigned int transfer_delay)
0974 {
0975     unsigned int syt;
0976 
0977     syt_offset += transfer_delay;
0978     syt = ((cycle + syt_offset / TICKS_PER_CYCLE) << 12) |
0979           (syt_offset % TICKS_PER_CYCLE);
0980     return syt & CIP_SYT_MASK;
0981 }
0982 
0983 static void generate_pkt_descs(struct amdtp_stream *s, const __be32 *ctx_header, unsigned int packets)
0984 {
0985     struct pkt_desc *descs = s->pkt_descs;
0986     const struct seq_desc *seq_descs = s->ctx_data.rx.seq.descs;
0987     const unsigned int seq_size = s->ctx_data.rx.seq.size;
0988     unsigned int dbc = s->data_block_counter;
0989     unsigned int seq_head = s->ctx_data.rx.seq.head;
0990     bool aware_syt = !(s->flags & CIP_UNAWARE_SYT);
0991     int i;
0992 
0993     for (i = 0; i < packets; ++i) {
0994         struct pkt_desc *desc = descs + i;
0995         unsigned int index = (s->packet_index + i) % s->queue_size;
0996         const struct seq_desc *seq = seq_descs + seq_head;
0997 
0998         desc->cycle = compute_ohci_it_cycle(*ctx_header, s->queue_size);
0999 
1000         if (aware_syt && seq->syt_offset != CIP_SYT_NO_INFO)
1001             desc->syt = compute_syt(seq->syt_offset, desc->cycle, s->transfer_delay);
1002         else
1003             desc->syt = CIP_SYT_NO_INFO;
1004 
1005         desc->data_blocks = seq->data_blocks;
1006 
1007         if (s->flags & CIP_DBC_IS_END_EVENT)
1008             dbc = (dbc + desc->data_blocks) & 0xff;
1009 
1010         desc->data_block_counter = dbc;
1011 
1012         if (!(s->flags & CIP_DBC_IS_END_EVENT))
1013             dbc = (dbc + desc->data_blocks) & 0xff;
1014 
1015         desc->ctx_payload = s->buffer.packets[index].buffer;
1016 
1017         seq_head = (seq_head + 1) % seq_size;
1018 
1019         ++ctx_header;
1020     }
1021 
1022     s->data_block_counter = dbc;
1023     s->ctx_data.rx.seq.head = seq_head;
1024 }
1025 
1026 static inline void cancel_stream(struct amdtp_stream *s)
1027 {
1028     s->packet_index = -1;
1029     if (in_softirq())
1030         amdtp_stream_pcm_abort(s);
1031     WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
1032 }
1033 
1034 static void process_ctx_payloads(struct amdtp_stream *s,
1035                  const struct pkt_desc *descs,
1036                  unsigned int packets)
1037 {
1038     struct snd_pcm_substream *pcm;
1039     unsigned int pcm_frames;
1040 
1041     pcm = READ_ONCE(s->pcm);
1042     pcm_frames = s->process_ctx_payloads(s, descs, packets, pcm);
1043     if (pcm)
1044         update_pcm_pointers(s, pcm, pcm_frames);
1045 }
1046 
1047 static void process_rx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1048                    void *header, void *private_data)
1049 {
1050     struct amdtp_stream *s = private_data;
1051     const struct amdtp_domain *d = s->domain;
1052     const __be32 *ctx_header = header;
1053     const unsigned int events_per_period = d->events_per_period;
1054     unsigned int event_count = s->ctx_data.rx.event_count;
1055     unsigned int pkt_header_length;
1056     unsigned int packets;
1057     bool need_hw_irq;
1058     int i;
1059 
1060     if (s->packet_index < 0)
1061         return;
1062 
1063     // Calculate the number of packets in buffer and check XRUN.
1064     packets = header_length / sizeof(*ctx_header);
1065 
1066     pool_seq_descs(s, packets);
1067 
1068     generate_pkt_descs(s, ctx_header, packets);
1069 
1070     process_ctx_payloads(s, s->pkt_descs, packets);
1071 
1072     if (!(s->flags & CIP_NO_HEADER))
1073         pkt_header_length = IT_PKT_HEADER_SIZE_CIP;
1074     else
1075         pkt_header_length = 0;
1076 
1077     if (s == d->irq_target) {
1078         // At NO_PERIOD_WAKEUP mode, the packets for all IT/IR contexts are processed by
1079         // the tasks of user process operating ALSA PCM character device by calling ioctl(2)
1080         // with some requests, instead of scheduled hardware IRQ of an IT context.
1081         struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
1082         need_hw_irq = !pcm || !pcm->runtime->no_period_wakeup;
1083     } else {
1084         need_hw_irq = false;
1085     }
1086 
1087     for (i = 0; i < packets; ++i) {
1088         const struct pkt_desc *desc = s->pkt_descs + i;
1089         struct {
1090             struct fw_iso_packet params;
1091             __be32 header[CIP_HEADER_QUADLETS];
1092         } template = { {0}, {0} };
1093         bool sched_irq = false;
1094 
1095         build_it_pkt_header(s, desc->cycle, &template.params, pkt_header_length,
1096                     desc->data_blocks, desc->data_block_counter,
1097                     desc->syt, i);
1098 
1099         if (s == s->domain->irq_target) {
1100             event_count += desc->data_blocks;
1101             if (event_count >= events_per_period) {
1102                 event_count -= events_per_period;
1103                 sched_irq = need_hw_irq;
1104             }
1105         }
1106 
1107         if (queue_out_packet(s, &template.params, sched_irq) < 0) {
1108             cancel_stream(s);
1109             return;
1110         }
1111     }
1112 
1113     s->ctx_data.rx.event_count = event_count;
1114 }
1115 
1116 static void skip_rx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1117                 void *header, void *private_data)
1118 {
1119     struct amdtp_stream *s = private_data;
1120     struct amdtp_domain *d = s->domain;
1121     const __be32 *ctx_header = header;
1122     unsigned int packets;
1123     unsigned int cycle;
1124     int i;
1125 
1126     if (s->packet_index < 0)
1127         return;
1128 
1129     packets = header_length / sizeof(*ctx_header);
1130 
1131     cycle = compute_ohci_it_cycle(ctx_header[packets - 1], s->queue_size);
1132     s->next_cycle = increment_ohci_cycle_count(cycle, 1);
1133 
1134     for (i = 0; i < packets; ++i) {
1135         struct fw_iso_packet params = {
1136             .header_length = 0,
1137             .payload_length = 0,
1138         };
1139         bool sched_irq = (s == d->irq_target && i == packets - 1);
1140 
1141         if (queue_out_packet(s, &params, sched_irq) < 0) {
1142             cancel_stream(s);
1143             return;
1144         }
1145     }
1146 }
1147 
1148 static void irq_target_callback(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1149                 void *header, void *private_data);
1150 
1151 static void process_rx_packets_intermediately(struct fw_iso_context *context, u32 tstamp,
1152                     size_t header_length, void *header, void *private_data)
1153 {
1154     struct amdtp_stream *s = private_data;
1155     struct amdtp_domain *d = s->domain;
1156     __be32 *ctx_header = header;
1157     const unsigned int queue_size = s->queue_size;
1158     unsigned int packets;
1159     unsigned int offset;
1160 
1161     if (s->packet_index < 0)
1162         return;
1163 
1164     packets = header_length / sizeof(*ctx_header);
1165 
1166     offset = 0;
1167     while (offset < packets) {
1168         unsigned int cycle = compute_ohci_it_cycle(ctx_header[offset], queue_size);
1169 
1170         if (compare_ohci_cycle_count(cycle, d->processing_cycle.rx_start) >= 0)
1171             break;
1172 
1173         ++offset;
1174     }
1175 
1176     if (offset > 0) {
1177         unsigned int length = sizeof(*ctx_header) * offset;
1178 
1179         skip_rx_packets(context, tstamp, length, ctx_header, private_data);
1180         if (amdtp_streaming_error(s))
1181             return;
1182 
1183         ctx_header += offset;
1184         header_length -= length;
1185     }
1186 
1187     if (offset < packets) {
1188         s->ready_processing = true;
1189         wake_up(&s->ready_wait);
1190 
1191         process_rx_packets(context, tstamp, header_length, ctx_header, private_data);
1192         if (amdtp_streaming_error(s))
1193             return;
1194 
1195         if (s == d->irq_target)
1196             s->context->callback.sc = irq_target_callback;
1197         else
1198             s->context->callback.sc = process_rx_packets;
1199     }
1200 }
1201 
1202 static void process_tx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1203                    void *header, void *private_data)
1204 {
1205     struct amdtp_stream *s = private_data;
1206     __be32 *ctx_header = header;
1207     unsigned int packets;
1208     unsigned int desc_count;
1209     int i;
1210     int err;
1211 
1212     if (s->packet_index < 0)
1213         return;
1214 
1215     // Calculate the number of packets in buffer and check XRUN.
1216     packets = header_length / s->ctx_data.tx.ctx_header_size;
1217 
1218     desc_count = 0;
1219     err = generate_device_pkt_descs(s, s->pkt_descs, ctx_header, packets, &desc_count);
1220     if (err < 0) {
1221         if (err != -EAGAIN) {
1222             cancel_stream(s);
1223             return;
1224         }
1225     } else {
1226         struct amdtp_domain *d = s->domain;
1227 
1228         process_ctx_payloads(s, s->pkt_descs, desc_count);
1229 
1230         if (d->replay.enable)
1231             cache_seq(s, s->pkt_descs, desc_count);
1232     }
1233 
1234     for (i = 0; i < packets; ++i) {
1235         struct fw_iso_packet params = {0};
1236 
1237         if (queue_in_packet(s, &params) < 0) {
1238             cancel_stream(s);
1239             return;
1240         }
1241     }
1242 }
1243 
1244 static void drop_tx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1245                 void *header, void *private_data)
1246 {
1247     struct amdtp_stream *s = private_data;
1248     const __be32 *ctx_header = header;
1249     unsigned int packets;
1250     unsigned int cycle;
1251     int i;
1252 
1253     if (s->packet_index < 0)
1254         return;
1255 
1256     packets = header_length / s->ctx_data.tx.ctx_header_size;
1257 
1258     ctx_header += (packets - 1) * s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
1259     cycle = compute_ohci_cycle_count(ctx_header[1]);
1260     s->next_cycle = increment_ohci_cycle_count(cycle, 1);
1261 
1262     for (i = 0; i < packets; ++i) {
1263         struct fw_iso_packet params = {0};
1264 
1265         if (queue_in_packet(s, &params) < 0) {
1266             cancel_stream(s);
1267             return;
1268         }
1269     }
1270 }
1271 
1272 static void process_tx_packets_intermediately(struct fw_iso_context *context, u32 tstamp,
1273                     size_t header_length, void *header, void *private_data)
1274 {
1275     struct amdtp_stream *s = private_data;
1276     struct amdtp_domain *d = s->domain;
1277     __be32 *ctx_header;
1278     unsigned int packets;
1279     unsigned int offset;
1280 
1281     if (s->packet_index < 0)
1282         return;
1283 
1284     packets = header_length / s->ctx_data.tx.ctx_header_size;
1285 
1286     offset = 0;
1287     ctx_header = header;
1288     while (offset < packets) {
1289         unsigned int cycle = compute_ohci_cycle_count(ctx_header[1]);
1290 
1291         if (compare_ohci_cycle_count(cycle, d->processing_cycle.tx_start) >= 0)
1292             break;
1293 
1294         ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(__be32);
1295         ++offset;
1296     }
1297 
1298     ctx_header = header;
1299 
1300     if (offset > 0) {
1301         size_t length = s->ctx_data.tx.ctx_header_size * offset;
1302 
1303         drop_tx_packets(context, tstamp, length, ctx_header, s);
1304         if (amdtp_streaming_error(s))
1305             return;
1306 
1307         ctx_header += length / sizeof(*ctx_header);
1308         header_length -= length;
1309     }
1310 
1311     if (offset < packets) {
1312         s->ready_processing = true;
1313         wake_up(&s->ready_wait);
1314 
1315         process_tx_packets(context, tstamp, header_length, ctx_header, s);
1316         if (amdtp_streaming_error(s))
1317             return;
1318 
1319         context->callback.sc = process_tx_packets;
1320     }
1321 }
1322 
1323 static void drop_tx_packets_initially(struct fw_iso_context *context, u32 tstamp,
1324                       size_t header_length, void *header, void *private_data)
1325 {
1326     struct amdtp_stream *s = private_data;
1327     struct amdtp_domain *d = s->domain;
1328     __be32 *ctx_header;
1329     unsigned int count;
1330     unsigned int events;
1331     int i;
1332 
1333     if (s->packet_index < 0)
1334         return;
1335 
1336     count = header_length / s->ctx_data.tx.ctx_header_size;
1337 
1338     // Attempt to detect any event in the batch of packets.
1339     events = 0;
1340     ctx_header = header;
1341     for (i = 0; i < count; ++i) {
1342         unsigned int payload_quads =
1343             (be32_to_cpu(*ctx_header) >> ISO_DATA_LENGTH_SHIFT) / sizeof(__be32);
1344         unsigned int data_blocks;
1345 
1346         if (s->flags & CIP_NO_HEADER) {
1347             data_blocks = payload_quads / s->data_block_quadlets;
1348         } else {
1349             __be32 *cip_headers = ctx_header + IR_CTX_HEADER_DEFAULT_QUADLETS;
1350 
1351             if (payload_quads < CIP_HEADER_QUADLETS) {
1352                 data_blocks = 0;
1353             } else {
1354                 payload_quads -= CIP_HEADER_QUADLETS;
1355 
1356                 if (s->flags & CIP_UNAWARE_SYT) {
1357                     data_blocks = payload_quads / s->data_block_quadlets;
1358                 } else {
1359                     u32 cip1 = be32_to_cpu(cip_headers[1]);
1360 
1361                     // NODATA packet can includes any data blocks but they are
1362                     // not available as event.
1363                     if ((cip1 & CIP_NO_DATA) == CIP_NO_DATA)
1364                         data_blocks = 0;
1365                     else
1366                         data_blocks = payload_quads / s->data_block_quadlets;
1367                 }
1368             }
1369         }
1370 
1371         events += data_blocks;
1372 
1373         ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(__be32);
1374     }
1375 
1376     drop_tx_packets(context, tstamp, header_length, header, s);
1377 
1378     if (events > 0)
1379         s->ctx_data.tx.event_starts = true;
1380 
1381     // Decide the cycle count to begin processing content of packet in IR contexts.
1382     {
1383         unsigned int stream_count = 0;
1384         unsigned int event_starts_count = 0;
1385         unsigned int cycle = UINT_MAX;
1386 
1387         list_for_each_entry(s, &d->streams, list) {
1388             if (s->direction == AMDTP_IN_STREAM) {
1389                 ++stream_count;
1390                 if (s->ctx_data.tx.event_starts)
1391                     ++event_starts_count;
1392             }
1393         }
1394 
1395         if (stream_count == event_starts_count) {
1396             unsigned int next_cycle;
1397 
1398             list_for_each_entry(s, &d->streams, list) {
1399                 if (s->direction != AMDTP_IN_STREAM)
1400                     continue;
1401 
1402                 next_cycle = increment_ohci_cycle_count(s->next_cycle,
1403                                 d->processing_cycle.tx_init_skip);
1404                 if (cycle == UINT_MAX ||
1405                     compare_ohci_cycle_count(next_cycle, cycle) > 0)
1406                     cycle = next_cycle;
1407 
1408                 s->context->callback.sc = process_tx_packets_intermediately;
1409             }
1410 
1411             d->processing_cycle.tx_start = cycle;
1412         }
1413     }
1414 }
1415 
1416 static void process_ctxs_in_domain(struct amdtp_domain *d)
1417 {
1418     struct amdtp_stream *s;
1419 
1420     list_for_each_entry(s, &d->streams, list) {
1421         if (s != d->irq_target && amdtp_stream_running(s))
1422             fw_iso_context_flush_completions(s->context);
1423 
1424         if (amdtp_streaming_error(s))
1425             goto error;
1426     }
1427 
1428     return;
1429 error:
1430     if (amdtp_stream_running(d->irq_target))
1431         cancel_stream(d->irq_target);
1432 
1433     list_for_each_entry(s, &d->streams, list) {
1434         if (amdtp_stream_running(s))
1435             cancel_stream(s);
1436     }
1437 }
1438 
1439 static void irq_target_callback(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1440                 void *header, void *private_data)
1441 {
1442     struct amdtp_stream *s = private_data;
1443     struct amdtp_domain *d = s->domain;
1444 
1445     process_rx_packets(context, tstamp, header_length, header, private_data);
1446     process_ctxs_in_domain(d);
1447 }
1448 
1449 static void irq_target_callback_intermediately(struct fw_iso_context *context, u32 tstamp,
1450                     size_t header_length, void *header, void *private_data)
1451 {
1452     struct amdtp_stream *s = private_data;
1453     struct amdtp_domain *d = s->domain;
1454 
1455     process_rx_packets_intermediately(context, tstamp, header_length, header, private_data);
1456     process_ctxs_in_domain(d);
1457 }
1458 
1459 static void irq_target_callback_skip(struct fw_iso_context *context, u32 tstamp,
1460                      size_t header_length, void *header, void *private_data)
1461 {
1462     struct amdtp_stream *s = private_data;
1463     struct amdtp_domain *d = s->domain;
1464     bool ready_to_start;
1465 
1466     skip_rx_packets(context, tstamp, header_length, header, private_data);
1467     process_ctxs_in_domain(d);
1468 
1469     if (d->replay.enable && !d->replay.on_the_fly) {
1470         unsigned int rx_count = 0;
1471         unsigned int rx_ready_count = 0;
1472         struct amdtp_stream *rx;
1473 
1474         list_for_each_entry(rx, &d->streams, list) {
1475             struct amdtp_stream *tx;
1476             unsigned int cached_cycles;
1477 
1478             if (rx->direction != AMDTP_OUT_STREAM)
1479                 continue;
1480             ++rx_count;
1481 
1482             tx = rx->ctx_data.rx.replay_target;
1483             cached_cycles = calculate_cached_cycle_count(tx, 0);
1484             if (cached_cycles > tx->ctx_data.tx.cache.size / 2)
1485                 ++rx_ready_count;
1486         }
1487 
1488         ready_to_start = (rx_count == rx_ready_count);
1489     } else {
1490         ready_to_start = true;
1491     }
1492 
1493     // Decide the cycle count to begin processing content of packet in IT contexts. All of IT
1494     // contexts are expected to start and get callback when reaching here.
1495     if (ready_to_start) {
1496         unsigned int cycle = s->next_cycle;
1497         list_for_each_entry(s, &d->streams, list) {
1498             if (s->direction != AMDTP_OUT_STREAM)
1499                 continue;
1500 
1501             if (compare_ohci_cycle_count(s->next_cycle, cycle) > 0)
1502                 cycle = s->next_cycle;
1503 
1504             if (s == d->irq_target)
1505                 s->context->callback.sc = irq_target_callback_intermediately;
1506             else
1507                 s->context->callback.sc = process_rx_packets_intermediately;
1508         }
1509 
1510         d->processing_cycle.rx_start = cycle;
1511     }
1512 }
1513 
1514 // This is executed one time. For in-stream, first packet has come. For out-stream, prepared to
1515 // transmit first packet.
1516 static void amdtp_stream_first_callback(struct fw_iso_context *context,
1517                     u32 tstamp, size_t header_length,
1518                     void *header, void *private_data)
1519 {
1520     struct amdtp_stream *s = private_data;
1521     struct amdtp_domain *d = s->domain;
1522 
1523     if (s->direction == AMDTP_IN_STREAM) {
1524         context->callback.sc = drop_tx_packets_initially;
1525     } else {
1526         if (s == d->irq_target)
1527             context->callback.sc = irq_target_callback_skip;
1528         else
1529             context->callback.sc = skip_rx_packets;
1530     }
1531 
1532     context->callback.sc(context, tstamp, header_length, header, s);
1533 }
1534 
1535 /**
1536  * amdtp_stream_start - start transferring packets
1537  * @s: the AMDTP stream to start
1538  * @channel: the isochronous channel on the bus
1539  * @speed: firewire speed code
1540  * @queue_size: The number of packets in the queue.
1541  * @idle_irq_interval: the interval to queue packet during initial state.
1542  *
1543  * The stream cannot be started until it has been configured with
1544  * amdtp_stream_set_parameters() and it must be started before any PCM or MIDI
1545  * device can be started.
1546  */
1547 static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
1548                   unsigned int queue_size, unsigned int idle_irq_interval)
1549 {
1550     bool is_irq_target = (s == s->domain->irq_target);
1551     unsigned int ctx_header_size;
1552     unsigned int max_ctx_payload_size;
1553     enum dma_data_direction dir;
1554     int type, tag, err;
1555 
1556     mutex_lock(&s->mutex);
1557 
1558     if (WARN_ON(amdtp_stream_running(s) ||
1559             (s->data_block_quadlets < 1))) {
1560         err = -EBADFD;
1561         goto err_unlock;
1562     }
1563 
1564     if (s->direction == AMDTP_IN_STREAM) {
1565         // NOTE: IT context should be used for constant IRQ.
1566         if (is_irq_target) {
1567             err = -EINVAL;
1568             goto err_unlock;
1569         }
1570 
1571         s->data_block_counter = UINT_MAX;
1572     } else {
1573         s->data_block_counter = 0;
1574     }
1575 
1576     // initialize packet buffer.
1577     if (s->direction == AMDTP_IN_STREAM) {
1578         dir = DMA_FROM_DEVICE;
1579         type = FW_ISO_CONTEXT_RECEIVE;
1580         if (!(s->flags & CIP_NO_HEADER))
1581             ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
1582         else
1583             ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
1584     } else {
1585         dir = DMA_TO_DEVICE;
1586         type = FW_ISO_CONTEXT_TRANSMIT;
1587         ctx_header_size = 0;    // No effect for IT context.
1588     }
1589     max_ctx_payload_size = amdtp_stream_get_max_ctx_payload_size(s);
1590 
1591     err = iso_packets_buffer_init(&s->buffer, s->unit, queue_size, max_ctx_payload_size, dir);
1592     if (err < 0)
1593         goto err_unlock;
1594     s->queue_size = queue_size;
1595 
1596     s->context = fw_iso_context_create(fw_parent_device(s->unit)->card,
1597                       type, channel, speed, ctx_header_size,
1598                       amdtp_stream_first_callback, s);
1599     if (IS_ERR(s->context)) {
1600         err = PTR_ERR(s->context);
1601         if (err == -EBUSY)
1602             dev_err(&s->unit->device,
1603                 "no free stream on this controller\n");
1604         goto err_buffer;
1605     }
1606 
1607     amdtp_stream_update(s);
1608 
1609     if (s->direction == AMDTP_IN_STREAM) {
1610         s->ctx_data.tx.max_ctx_payload_length = max_ctx_payload_size;
1611         s->ctx_data.tx.ctx_header_size = ctx_header_size;
1612         s->ctx_data.tx.event_starts = false;
1613 
1614         if (s->domain->replay.enable) {
1615             // struct fw_iso_context.drop_overflow_headers is false therefore it's
1616             // possible to cache much unexpectedly.
1617             s->ctx_data.tx.cache.size = max_t(unsigned int, s->syt_interval * 2,
1618                               queue_size * 3 / 2);
1619             s->ctx_data.tx.cache.tail = 0;
1620             s->ctx_data.tx.cache.descs = kcalloc(s->ctx_data.tx.cache.size,
1621                         sizeof(*s->ctx_data.tx.cache.descs), GFP_KERNEL);
1622             if (!s->ctx_data.tx.cache.descs) {
1623                 err = -ENOMEM;
1624                 goto err_context;
1625             }
1626         }
1627     } else {
1628         static const struct {
1629             unsigned int data_block;
1630             unsigned int syt_offset;
1631         } *entry, initial_state[] = {
1632             [CIP_SFC_32000]  = {  4, 3072 },
1633             [CIP_SFC_48000]  = {  6, 1024 },
1634             [CIP_SFC_96000]  = { 12, 1024 },
1635             [CIP_SFC_192000] = { 24, 1024 },
1636             [CIP_SFC_44100]  = {  0,   67 },
1637             [CIP_SFC_88200]  = {  0,   67 },
1638             [CIP_SFC_176400] = {  0,   67 },
1639         };
1640 
1641         s->ctx_data.rx.seq.descs = kcalloc(queue_size, sizeof(*s->ctx_data.rx.seq.descs), GFP_KERNEL);
1642         if (!s->ctx_data.rx.seq.descs) {
1643             err = -ENOMEM;
1644             goto err_context;
1645         }
1646         s->ctx_data.rx.seq.size = queue_size;
1647         s->ctx_data.rx.seq.tail = 0;
1648         s->ctx_data.rx.seq.head = 0;
1649 
1650         entry = &initial_state[s->sfc];
1651         s->ctx_data.rx.data_block_state = entry->data_block;
1652         s->ctx_data.rx.syt_offset_state = entry->syt_offset;
1653         s->ctx_data.rx.last_syt_offset = TICKS_PER_CYCLE;
1654 
1655         s->ctx_data.rx.event_count = 0;
1656     }
1657 
1658     if (s->flags & CIP_NO_HEADER)
1659         s->tag = TAG_NO_CIP_HEADER;
1660     else
1661         s->tag = TAG_CIP;
1662 
1663     s->pkt_descs = kcalloc(s->queue_size, sizeof(*s->pkt_descs),
1664                    GFP_KERNEL);
1665     if (!s->pkt_descs) {
1666         err = -ENOMEM;
1667         goto err_context;
1668     }
1669 
1670     s->packet_index = 0;
1671     do {
1672         struct fw_iso_packet params;
1673 
1674         if (s->direction == AMDTP_IN_STREAM) {
1675             err = queue_in_packet(s, &params);
1676         } else {
1677             bool sched_irq = false;
1678 
1679             params.header_length = 0;
1680             params.payload_length = 0;
1681 
1682             if (is_irq_target) {
1683                 sched_irq = !((s->packet_index + 1) %
1684                           idle_irq_interval);
1685             }
1686 
1687             err = queue_out_packet(s, &params, sched_irq);
1688         }
1689         if (err < 0)
1690             goto err_pkt_descs;
1691     } while (s->packet_index > 0);
1692 
1693     /* NOTE: TAG1 matches CIP. This just affects in stream. */
1694     tag = FW_ISO_CONTEXT_MATCH_TAG1;
1695     if ((s->flags & CIP_EMPTY_WITH_TAG0) || (s->flags & CIP_NO_HEADER))
1696         tag |= FW_ISO_CONTEXT_MATCH_TAG0;
1697 
1698     s->ready_processing = false;
1699     err = fw_iso_context_start(s->context, -1, 0, tag);
1700     if (err < 0)
1701         goto err_pkt_descs;
1702 
1703     mutex_unlock(&s->mutex);
1704 
1705     return 0;
1706 err_pkt_descs:
1707     kfree(s->pkt_descs);
1708 err_context:
1709     if (s->direction == AMDTP_OUT_STREAM) {
1710         kfree(s->ctx_data.rx.seq.descs);
1711     } else {
1712         if (s->domain->replay.enable)
1713             kfree(s->ctx_data.tx.cache.descs);
1714     }
1715     fw_iso_context_destroy(s->context);
1716     s->context = ERR_PTR(-1);
1717 err_buffer:
1718     iso_packets_buffer_destroy(&s->buffer, s->unit);
1719 err_unlock:
1720     mutex_unlock(&s->mutex);
1721 
1722     return err;
1723 }
1724 
1725 /**
1726  * amdtp_domain_stream_pcm_pointer - get the PCM buffer position
1727  * @d: the AMDTP domain.
1728  * @s: the AMDTP stream that transports the PCM data
1729  *
1730  * Returns the current buffer position, in frames.
1731  */
1732 unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d,
1733                           struct amdtp_stream *s)
1734 {
1735     struct amdtp_stream *irq_target = d->irq_target;
1736 
1737     // Process isochronous packets queued till recent isochronous cycle to handle PCM frames.
1738     if (irq_target && amdtp_stream_running(irq_target)) {
1739         // In software IRQ context, the call causes dead-lock to disable the tasklet
1740         // synchronously.
1741         if (!in_softirq())
1742             fw_iso_context_flush_completions(irq_target->context);
1743     }
1744 
1745     return READ_ONCE(s->pcm_buffer_pointer);
1746 }
1747 EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_pointer);
1748 
1749 /**
1750  * amdtp_domain_stream_pcm_ack - acknowledge queued PCM frames
1751  * @d: the AMDTP domain.
1752  * @s: the AMDTP stream that transfers the PCM frames
1753  *
1754  * Returns zero always.
1755  */
1756 int amdtp_domain_stream_pcm_ack(struct amdtp_domain *d, struct amdtp_stream *s)
1757 {
1758     struct amdtp_stream *irq_target = d->irq_target;
1759 
1760     // Process isochronous packets for recent isochronous cycle to handle
1761     // queued PCM frames.
1762     if (irq_target && amdtp_stream_running(irq_target))
1763         fw_iso_context_flush_completions(irq_target->context);
1764 
1765     return 0;
1766 }
1767 EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_ack);
1768 
1769 /**
1770  * amdtp_stream_update - update the stream after a bus reset
1771  * @s: the AMDTP stream
1772  */
1773 void amdtp_stream_update(struct amdtp_stream *s)
1774 {
1775     /* Precomputing. */
1776     WRITE_ONCE(s->source_node_id_field,
1777                    (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK);
1778 }
1779 EXPORT_SYMBOL(amdtp_stream_update);
1780 
1781 /**
1782  * amdtp_stream_stop - stop sending packets
1783  * @s: the AMDTP stream to stop
1784  *
1785  * All PCM and MIDI devices of the stream must be stopped before the stream
1786  * itself can be stopped.
1787  */
1788 static void amdtp_stream_stop(struct amdtp_stream *s)
1789 {
1790     mutex_lock(&s->mutex);
1791 
1792     if (!amdtp_stream_running(s)) {
1793         mutex_unlock(&s->mutex);
1794         return;
1795     }
1796 
1797     fw_iso_context_stop(s->context);
1798     fw_iso_context_destroy(s->context);
1799     s->context = ERR_PTR(-1);
1800     iso_packets_buffer_destroy(&s->buffer, s->unit);
1801     kfree(s->pkt_descs);
1802 
1803     if (s->direction == AMDTP_OUT_STREAM) {
1804         kfree(s->ctx_data.rx.seq.descs);
1805     } else {
1806         if (s->domain->replay.enable)
1807             kfree(s->ctx_data.tx.cache.descs);
1808     }
1809 
1810     mutex_unlock(&s->mutex);
1811 }
1812 
1813 /**
1814  * amdtp_stream_pcm_abort - abort the running PCM device
1815  * @s: the AMDTP stream about to be stopped
1816  *
1817  * If the isochronous stream needs to be stopped asynchronously, call this
1818  * function first to stop the PCM device.
1819  */
1820 void amdtp_stream_pcm_abort(struct amdtp_stream *s)
1821 {
1822     struct snd_pcm_substream *pcm;
1823 
1824     pcm = READ_ONCE(s->pcm);
1825     if (pcm)
1826         snd_pcm_stop_xrun(pcm);
1827 }
1828 EXPORT_SYMBOL(amdtp_stream_pcm_abort);
1829 
1830 /**
1831  * amdtp_domain_init - initialize an AMDTP domain structure
1832  * @d: the AMDTP domain to initialize.
1833  */
1834 int amdtp_domain_init(struct amdtp_domain *d)
1835 {
1836     INIT_LIST_HEAD(&d->streams);
1837 
1838     d->events_per_period = 0;
1839 
1840     return 0;
1841 }
1842 EXPORT_SYMBOL_GPL(amdtp_domain_init);
1843 
1844 /**
1845  * amdtp_domain_destroy - destroy an AMDTP domain structure
1846  * @d: the AMDTP domain to destroy.
1847  */
1848 void amdtp_domain_destroy(struct amdtp_domain *d)
1849 {
1850     // At present nothing to do.
1851     return;
1852 }
1853 EXPORT_SYMBOL_GPL(amdtp_domain_destroy);
1854 
1855 /**
1856  * amdtp_domain_add_stream - register isoc context into the domain.
1857  * @d: the AMDTP domain.
1858  * @s: the AMDTP stream.
1859  * @channel: the isochronous channel on the bus.
1860  * @speed: firewire speed code.
1861  */
1862 int amdtp_domain_add_stream(struct amdtp_domain *d, struct amdtp_stream *s,
1863                 int channel, int speed)
1864 {
1865     struct amdtp_stream *tmp;
1866 
1867     list_for_each_entry(tmp, &d->streams, list) {
1868         if (s == tmp)
1869             return -EBUSY;
1870     }
1871 
1872     list_add(&s->list, &d->streams);
1873 
1874     s->channel = channel;
1875     s->speed = speed;
1876     s->domain = d;
1877 
1878     return 0;
1879 }
1880 EXPORT_SYMBOL_GPL(amdtp_domain_add_stream);
1881 
1882 // Make the reference from rx stream to tx stream for sequence replay. When the number of tx streams
1883 // is less than the number of rx streams, the first tx stream is selected.
1884 static int make_association(struct amdtp_domain *d)
1885 {
1886     unsigned int dst_index = 0;
1887     struct amdtp_stream *rx;
1888 
1889     // Make association to replay target.
1890     list_for_each_entry(rx, &d->streams, list) {
1891         if (rx->direction == AMDTP_OUT_STREAM) {
1892             unsigned int src_index = 0;
1893             struct amdtp_stream *tx = NULL;
1894             struct amdtp_stream *s;
1895 
1896             list_for_each_entry(s, &d->streams, list) {
1897                 if (s->direction == AMDTP_IN_STREAM) {
1898                     if (dst_index == src_index) {
1899                         tx = s;
1900                         break;
1901                     }
1902 
1903                     ++src_index;
1904                 }
1905             }
1906             if (!tx) {
1907                 // Select the first entry.
1908                 list_for_each_entry(s, &d->streams, list) {
1909                     if (s->direction == AMDTP_IN_STREAM) {
1910                         tx = s;
1911                         break;
1912                     }
1913                 }
1914                 // No target is available to replay sequence.
1915                 if (!tx)
1916                     return -EINVAL;
1917             }
1918 
1919             rx->ctx_data.rx.replay_target = tx;
1920             rx->ctx_data.rx.cache_head = 0;
1921 
1922             ++dst_index;
1923         }
1924     }
1925 
1926     return 0;
1927 }
1928 
1929 /**
1930  * amdtp_domain_start - start sending packets for isoc context in the domain.
1931  * @d: the AMDTP domain.
1932  * @tx_init_skip_cycles: the number of cycles to skip processing packets at initial stage of IR
1933  *           contexts.
1934  * @replay_seq: whether to replay the sequence of packet in IR context for the sequence of packet in
1935  *      IT context.
1936  * @replay_on_the_fly: transfer rx packets according to nominal frequency, then begin to replay
1937  *             according to arrival of events in tx packets.
1938  */
1939 int amdtp_domain_start(struct amdtp_domain *d, unsigned int tx_init_skip_cycles, bool replay_seq,
1940                bool replay_on_the_fly)
1941 {
1942     unsigned int events_per_buffer = d->events_per_buffer;
1943     unsigned int events_per_period = d->events_per_period;
1944     unsigned int queue_size;
1945     struct amdtp_stream *s;
1946     bool found = false;
1947     int err;
1948 
1949     if (replay_seq) {
1950         err = make_association(d);
1951         if (err < 0)
1952             return err;
1953     }
1954     d->replay.enable = replay_seq;
1955     d->replay.on_the_fly = replay_on_the_fly;
1956 
1957     // Select an IT context as IRQ target.
1958     list_for_each_entry(s, &d->streams, list) {
1959         if (s->direction == AMDTP_OUT_STREAM) {
1960             found = true;
1961             break;
1962         }
1963     }
1964     if (!found)
1965         return -ENXIO;
1966     d->irq_target = s;
1967 
1968     d->processing_cycle.tx_init_skip = tx_init_skip_cycles;
1969 
1970     // This is a case that AMDTP streams in domain run just for MIDI
1971     // substream. Use the number of events equivalent to 10 msec as
1972     // interval of hardware IRQ.
1973     if (events_per_period == 0)
1974         events_per_period = amdtp_rate_table[d->irq_target->sfc] / 100;
1975     if (events_per_buffer == 0)
1976         events_per_buffer = events_per_period * 3;
1977 
1978     queue_size = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_buffer,
1979                   amdtp_rate_table[d->irq_target->sfc]);
1980 
1981     list_for_each_entry(s, &d->streams, list) {
1982         unsigned int idle_irq_interval = 0;
1983 
1984         if (s->direction == AMDTP_OUT_STREAM && s == d->irq_target) {
1985             idle_irq_interval = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_period,
1986                              amdtp_rate_table[d->irq_target->sfc]);
1987         }
1988 
1989         // Starts immediately but actually DMA context starts several hundred cycles later.
1990         err = amdtp_stream_start(s, s->channel, s->speed, queue_size, idle_irq_interval);
1991         if (err < 0)
1992             goto error;
1993     }
1994 
1995     return 0;
1996 error:
1997     list_for_each_entry(s, &d->streams, list)
1998         amdtp_stream_stop(s);
1999     return err;
2000 }
2001 EXPORT_SYMBOL_GPL(amdtp_domain_start);
2002 
2003 /**
2004  * amdtp_domain_stop - stop sending packets for isoc context in the same domain.
2005  * @d: the AMDTP domain to which the isoc contexts belong.
2006  */
2007 void amdtp_domain_stop(struct amdtp_domain *d)
2008 {
2009     struct amdtp_stream *s, *next;
2010 
2011     if (d->irq_target)
2012         amdtp_stream_stop(d->irq_target);
2013 
2014     list_for_each_entry_safe(s, next, &d->streams, list) {
2015         list_del(&s->list);
2016 
2017         if (s != d->irq_target)
2018             amdtp_stream_stop(s);
2019     }
2020 
2021     d->events_per_period = 0;
2022     d->irq_target = NULL;
2023 }
2024 EXPORT_SYMBOL_GPL(amdtp_domain_stop);