1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Audio and Music Data Transmission Protocol (IEC 61883-6) streams
4 * with Common Isochronous Packet (IEC 61883-1) headers
5 *
6 * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
7 */
8
9#include <linux/device.h>
10#include <linux/err.h>
11#include <linux/firewire.h>
12#include <linux/firewire-constants.h>
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <sound/pcm.h>
16#include <sound/pcm_params.h>
17#include "amdtp-stream.h"
18
19#define TICKS_PER_CYCLE		3072
20#define CYCLES_PER_SECOND	8000
21#define TICKS_PER_SECOND	(TICKS_PER_CYCLE * CYCLES_PER_SECOND)
22
23#define OHCI_MAX_SECOND		8
24
25/* Always support Linux tracing subsystem. */
26#define CREATE_TRACE_POINTS
27#include "amdtp-stream-trace.h"
28
29#define TRANSFER_DELAY_TICKS	0x2e00 /* 479.17 microseconds */
30
31/* isochronous header parameters */
32#define ISO_DATA_LENGTH_SHIFT	16
33#define TAG_NO_CIP_HEADER	0
34#define TAG_CIP			1
35
36/* common isochronous packet header parameters */
37#define CIP_EOH_SHIFT		31
38#define CIP_EOH			(1u << CIP_EOH_SHIFT)
39#define CIP_EOH_MASK		0x80000000
40#define CIP_SID_SHIFT		24
41#define CIP_SID_MASK		0x3f000000
42#define CIP_DBS_MASK		0x00ff0000
43#define CIP_DBS_SHIFT		16
44#define CIP_SPH_MASK		0x00000400
45#define CIP_SPH_SHIFT		10
46#define CIP_DBC_MASK		0x000000ff
47#define CIP_FMT_SHIFT		24
48#define CIP_FMT_MASK		0x3f000000
49#define CIP_FDF_MASK		0x00ff0000
50#define CIP_FDF_SHIFT		16
51#define CIP_SYT_MASK		0x0000ffff
52#define CIP_SYT_NO_INFO		0xffff
53
54/* Audio and Music transfer protocol specific parameters */
55#define CIP_FMT_AM		0x10
56#define AMDTP_FDF_NO_DATA	0xff
57
58// For iso header, tstamp and 2 CIP header.
59#define IR_CTX_HEADER_SIZE_CIP		16
60// For iso header and tstamp.
61#define IR_CTX_HEADER_SIZE_NO_CIP	8
62#define HEADER_TSTAMP_MASK	0x0000ffff
63
64#define IT_PKT_HEADER_SIZE_CIP		8 // For 2 CIP header.
65#define IT_PKT_HEADER_SIZE_NO_CIP	0 // Nothing.
66
67static void pcm_period_work(struct work_struct *work);
68
69/**
70 * amdtp_stream_init - initialize an AMDTP stream structure
71 * @s: the AMDTP stream to initialize
72 * @unit: the target of the stream
73 * @dir: the direction of stream
74 * @flags: the packet transmission method to use
75 * @fmt: the value of fmt field in CIP header
76 * @process_ctx_payloads: callback handler to process payloads of isoc context
77 * @protocol_size: the size to allocate newly for protocol
78 */
79int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
80		      enum amdtp_stream_direction dir, enum cip_flags flags,
81		      unsigned int fmt,
82		      amdtp_stream_process_ctx_payloads_t process_ctx_payloads,
83		      unsigned int protocol_size)
84{
85	if (process_ctx_payloads == NULL)
86		return -EINVAL;
87
88	s->protocol = kzalloc(protocol_size, GFP_KERNEL);
89	if (!s->protocol)
90		return -ENOMEM;
91
92	s->unit = unit;
93	s->direction = dir;
94	s->flags = flags;
95	s->context = ERR_PTR(-1);
96	mutex_init(&s->mutex);
97	INIT_WORK(&s->period_work, pcm_period_work);
98	s->packet_index = 0;
99
100	init_waitqueue_head(&s->callback_wait);
101	s->callbacked = false;
102
103	s->fmt = fmt;
104	s->process_ctx_payloads = process_ctx_payloads;
105
106	if (dir == AMDTP_OUT_STREAM)
107		s->ctx_data.rx.syt_override = -1;
108
109	return 0;
110}
111EXPORT_SYMBOL(amdtp_stream_init);
112
113/**
114 * amdtp_stream_destroy - free stream resources
115 * @s: the AMDTP stream to destroy
116 */
117void amdtp_stream_destroy(struct amdtp_stream *s)
118{
119	/* Not initialized. */
120	if (s->protocol == NULL)
121		return;
122
123	WARN_ON(amdtp_stream_running(s));
124	kfree(s->protocol);
125	mutex_destroy(&s->mutex);
126}
127EXPORT_SYMBOL(amdtp_stream_destroy);
128
129const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT] = {
130	[CIP_SFC_32000]  =  8,
131	[CIP_SFC_44100]  =  8,
132	[CIP_SFC_48000]  =  8,
133	[CIP_SFC_88200]  = 16,
134	[CIP_SFC_96000]  = 16,
135	[CIP_SFC_176400] = 32,
136	[CIP_SFC_192000] = 32,
137};
138EXPORT_SYMBOL(amdtp_syt_intervals);
139
140const unsigned int amdtp_rate_table[CIP_SFC_COUNT] = {
141	[CIP_SFC_32000]  =  32000,
142	[CIP_SFC_44100]  =  44100,
143	[CIP_SFC_48000]  =  48000,
144	[CIP_SFC_88200]  =  88200,
145	[CIP_SFC_96000]  =  96000,
146	[CIP_SFC_176400] = 176400,
147	[CIP_SFC_192000] = 192000,
148};
149EXPORT_SYMBOL(amdtp_rate_table);
150
151static int apply_constraint_to_size(struct snd_pcm_hw_params *params,
152				    struct snd_pcm_hw_rule *rule)
153{
154	struct snd_interval *s = hw_param_interval(params, rule->var);
155	const struct snd_interval *r =
156		hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
157	struct snd_interval t = {0};
158	unsigned int step = 0;
159	int i;
160
161	for (i = 0; i < CIP_SFC_COUNT; ++i) {
162		if (snd_interval_test(r, amdtp_rate_table[i]))
163			step = max(step, amdtp_syt_intervals[i]);
164	}
165
166	t.min = roundup(s->min, step);
167	t.max = rounddown(s->max, step);
168	t.integer = 1;
169
170	return snd_interval_refine(s, &t);
171}
172
173/**
174 * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream
175 * @s:		the AMDTP stream, which must be initialized.
176 * @runtime:	the PCM substream runtime
177 */
178int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
179					struct snd_pcm_runtime *runtime)
180{
181	struct snd_pcm_hardware *hw = &runtime->hw;
182	unsigned int ctx_header_size;
183	unsigned int maximum_usec_per_period;
184	int err;
185
186	hw->info = SNDRV_PCM_INFO_BATCH |
187		   SNDRV_PCM_INFO_BLOCK_TRANSFER |
188		   SNDRV_PCM_INFO_INTERLEAVED |
189		   SNDRV_PCM_INFO_JOINT_DUPLEX |
190		   SNDRV_PCM_INFO_MMAP |
191		   SNDRV_PCM_INFO_MMAP_VALID;
192
193	/* SNDRV_PCM_INFO_BATCH */
194	hw->periods_min = 2;
195	hw->periods_max = UINT_MAX;
196
197	/* bytes for a frame */
198	hw->period_bytes_min = 4 * hw->channels_max;
199
200	/* Just to prevent from allocating much pages. */
201	hw->period_bytes_max = hw->period_bytes_min * 2048;
202	hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min;
203
204	// Linux driver for 1394 OHCI controller voluntarily flushes isoc
205	// context when total size of accumulated context header reaches
206	// PAGE_SIZE. This kicks work for the isoc context and brings
207	// callback in the middle of scheduled interrupts.
208	// Although AMDTP streams in the same domain use the same events per
209	// IRQ, use the largest size of context header between IT/IR contexts.
210	// Here, use the value of context header in IR context is for both
211	// contexts.
212	if (!(s->flags & CIP_NO_HEADER))
213		ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
214	else
215		ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
216	maximum_usec_per_period = USEC_PER_SEC * PAGE_SIZE /
217				  CYCLES_PER_SECOND / ctx_header_size;
218
219	// In IEC 61883-6, one isoc packet can transfer events up to the value
220	// of syt interval. This comes from the interval of isoc cycle. As 1394
221	// OHCI controller can generate hardware IRQ per isoc packet, the
222	// interval is 125 usec.
223	// However, there are two ways of transmission in IEC 61883-6; blocking
224	// and non-blocking modes. In blocking mode, the sequence of isoc packet
225	// includes 'empty' or 'NODATA' packets which include no event. In
226	// non-blocking mode, the number of events per packet is variable up to
227	// the syt interval.
228	// Due to the above protocol design, the minimum PCM frames per
229	// interrupt should be double of the value of syt interval, thus it is
230	// 250 usec.
231	err = snd_pcm_hw_constraint_minmax(runtime,
232					   SNDRV_PCM_HW_PARAM_PERIOD_TIME,
233					   250, maximum_usec_per_period);
234	if (err < 0)
235		goto end;
236
237	/* Non-Blocking stream has no more constraints */
238	if (!(s->flags & CIP_BLOCKING))
239		goto end;
240
241	/*
242	 * One AMDTP packet can include some frames. In blocking mode, the
243	 * number equals to SYT_INTERVAL. So the number is 8, 16 or 32,
244	 * depending on its sampling rate. For accurate period interrupt, it's
245	 * preferrable to align period/buffer sizes to current SYT_INTERVAL.
246	 */
247	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
248				  apply_constraint_to_size, NULL,
249				  SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
250				  SNDRV_PCM_HW_PARAM_RATE, -1);
251	if (err < 0)
252		goto end;
253	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
254				  apply_constraint_to_size, NULL,
255				  SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
256				  SNDRV_PCM_HW_PARAM_RATE, -1);
257	if (err < 0)
258		goto end;
259end:
260	return err;
261}
262EXPORT_SYMBOL(amdtp_stream_add_pcm_hw_constraints);
263
264/**
265 * amdtp_stream_set_parameters - set stream parameters
266 * @s: the AMDTP stream to configure
267 * @rate: the sample rate
268 * @data_block_quadlets: the size of a data block in quadlet unit
269 *
270 * The parameters must be set before the stream is started, and must not be
271 * changed while the stream is running.
272 */
273int amdtp_stream_set_parameters(struct amdtp_stream *s, unsigned int rate,
274				unsigned int data_block_quadlets)
275{
276	unsigned int sfc;
277
278	for (sfc = 0; sfc < ARRAY_SIZE(amdtp_rate_table); ++sfc) {
279		if (amdtp_rate_table[sfc] == rate)
280			break;
281	}
282	if (sfc == ARRAY_SIZE(amdtp_rate_table))
283		return -EINVAL;
284
285	s->sfc = sfc;
286	s->data_block_quadlets = data_block_quadlets;
287	s->syt_interval = amdtp_syt_intervals[sfc];
288
289	// default buffering in the device.
290	if (s->direction == AMDTP_OUT_STREAM) {
291		s->ctx_data.rx.transfer_delay =
292					TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE;
293
294		if (s->flags & CIP_BLOCKING) {
295			// additional buffering needed to adjust for no-data
296			// packets.
297			s->ctx_data.rx.transfer_delay +=
298				TICKS_PER_SECOND * s->syt_interval / rate;
299		}
300	}
301
302	return 0;
303}
304EXPORT_SYMBOL(amdtp_stream_set_parameters);
305
306/**
307 * amdtp_stream_get_max_payload - get the stream's packet size
308 * @s: the AMDTP stream
309 *
310 * This function must not be called before the stream has been configured
311 * with amdtp_stream_set_parameters().
312 */
313unsigned int amdtp_stream_get_max_payload(struct amdtp_stream *s)
314{
315	unsigned int multiplier = 1;
316	unsigned int cip_header_size = 0;
317
318	if (s->flags & CIP_JUMBO_PAYLOAD)
319		multiplier = 5;
320	if (!(s->flags & CIP_NO_HEADER))
321		cip_header_size = sizeof(__be32) * 2;
322
323	return cip_header_size +
324		s->syt_interval * s->data_block_quadlets * sizeof(__be32) * multiplier;
325}
326EXPORT_SYMBOL(amdtp_stream_get_max_payload);
327
328/**
329 * amdtp_stream_pcm_prepare - prepare PCM device for running
330 * @s: the AMDTP stream
331 *
332 * This function should be called from the PCM device's .prepare callback.
333 */
334void amdtp_stream_pcm_prepare(struct amdtp_stream *s)
335{
336	cancel_work_sync(&s->period_work);
337	s->pcm_buffer_pointer = 0;
338	s->pcm_period_pointer = 0;
339}
340EXPORT_SYMBOL(amdtp_stream_pcm_prepare);
341
342static unsigned int calculate_data_blocks(unsigned int *data_block_state,
343				bool is_blocking, bool is_no_info,
344				unsigned int syt_interval, enum cip_sfc sfc)
345{
346	unsigned int data_blocks;
347
348	/* Blocking mode. */
349	if (is_blocking) {
350		/* This module generate empty packet for 'no data'. */
351		if (is_no_info)
352			data_blocks = 0;
353		else
354			data_blocks = syt_interval;
355	/* Non-blocking mode. */
356	} else {
357		if (!cip_sfc_is_base_44100(sfc)) {
358			// Sample_rate / 8000 is an integer, and precomputed.
359			data_blocks = *data_block_state;
360		} else {
361			unsigned int phase = *data_block_state;
362
363		/*
364		 * This calculates the number of data blocks per packet so that
365		 * 1) the overall rate is correct and exactly synchronized to
366		 *    the bus clock, and
367		 * 2) packets with a rounded-up number of blocks occur as early
368		 *    as possible in the sequence (to prevent underruns of the
369		 *    device's buffer).
370		 */
371			if (sfc == CIP_SFC_44100)
372				/* 6 6 5 6 5 6 5 ... */
373				data_blocks = 5 + ((phase & 1) ^
374						   (phase == 0 || phase >= 40));
375			else
376				/* 12 11 11 11 11 ... or 23 22 22 22 22 ... */
377				data_blocks = 11 * (sfc >> 1) + (phase == 0);
378			if (++phase >= (80 >> (sfc >> 1)))
379				phase = 0;
380			*data_block_state = phase;
381		}
382	}
383
384	return data_blocks;
385}
386
387static unsigned int calculate_syt_offset(unsigned int *last_syt_offset,
388			unsigned int *syt_offset_state, enum cip_sfc sfc)
389{
390	unsigned int syt_offset;
391
392	if (*last_syt_offset < TICKS_PER_CYCLE) {
393		if (!cip_sfc_is_base_44100(sfc))
394			syt_offset = *last_syt_offset + *syt_offset_state;
395		else {
396		/*
397		 * The time, in ticks, of the n'th SYT_INTERVAL sample is:
398		 *   n * SYT_INTERVAL * 24576000 / sample_rate
399		 * Modulo TICKS_PER_CYCLE, the difference between successive
400		 * elements is about 1386.23.  Rounding the results of this
401		 * formula to the SYT precision results in a sequence of
402		 * differences that begins with:
403		 *   1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ...
404		 * This code generates _exactly_ the same sequence.
405		 */
406			unsigned int phase = *syt_offset_state;
407			unsigned int index = phase % 13;
408
409			syt_offset = *last_syt_offset;
410			syt_offset += 1386 + ((index && !(index & 3)) ||
411					      phase == 146);
412			if (++phase >= 147)
413				phase = 0;
414			*syt_offset_state = phase;
415		}
416	} else
417		syt_offset = *last_syt_offset - TICKS_PER_CYCLE;
418	*last_syt_offset = syt_offset;
419
420	if (syt_offset >= TICKS_PER_CYCLE)
421		syt_offset = CIP_SYT_NO_INFO;
422
423	return syt_offset;
424}
425
426static void update_pcm_pointers(struct amdtp_stream *s,
427				struct snd_pcm_substream *pcm,
428				unsigned int frames)
429{
430	unsigned int ptr;
431
432	ptr = s->pcm_buffer_pointer + frames;
433	if (ptr >= pcm->runtime->buffer_size)
434		ptr -= pcm->runtime->buffer_size;
435	WRITE_ONCE(s->pcm_buffer_pointer, ptr);
436
437	s->pcm_period_pointer += frames;
438	if (s->pcm_period_pointer >= pcm->runtime->period_size) {
439		s->pcm_period_pointer -= pcm->runtime->period_size;
440		queue_work(system_highpri_wq, &s->period_work);
441	}
442}
443
444static void pcm_period_work(struct work_struct *work)
445{
446	struct amdtp_stream *s = container_of(work, struct amdtp_stream,
447					      period_work);
448	struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
449
450	if (pcm)
451		snd_pcm_period_elapsed(pcm);
452}
453
454static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params,
455			bool sched_irq)
456{
457	int err;
458
459	params->interrupt = sched_irq;
460	params->tag = s->tag;
461	params->sy = 0;
462
463	err = fw_iso_context_queue(s->context, params, &s->buffer.iso_buffer,
464				   s->buffer.packets[s->packet_index].offset);
465	if (err < 0) {
466		dev_err(&s->unit->device, "queueing error: %d\n", err);
467		goto end;
468	}
469
470	if (++s->packet_index >= s->queue_size)
471		s->packet_index = 0;
472end:
473	return err;
474}
475
476static inline int queue_out_packet(struct amdtp_stream *s,
477				   struct fw_iso_packet *params, bool sched_irq)
478{
479	params->skip =
480		!!(params->header_length == 0 && params->payload_length == 0);
481	return queue_packet(s, params, sched_irq);
482}
483
484static inline int queue_in_packet(struct amdtp_stream *s,
485				  struct fw_iso_packet *params)
486{
487	// Queue one packet for IR context.
488	params->header_length = s->ctx_data.tx.ctx_header_size;
489	params->payload_length = s->ctx_data.tx.max_ctx_payload_length;
490	params->skip = false;
491	return queue_packet(s, params, false);
492}
493
494static void generate_cip_header(struct amdtp_stream *s, __be32 cip_header[2],
495			unsigned int data_block_counter, unsigned int syt)
496{
497	cip_header[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) |
498				(s->data_block_quadlets << CIP_DBS_SHIFT) |
499				((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) |
500				data_block_counter);
501	cip_header[1] = cpu_to_be32(CIP_EOH |
502			((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) |
503			((s->ctx_data.rx.fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) |
504			(syt & CIP_SYT_MASK));
505}
506
507static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle,
508				struct fw_iso_packet *params,
509				unsigned int data_blocks,
510				unsigned int data_block_counter,
511				unsigned int syt, unsigned int index)
512{
513	unsigned int payload_length;
514	__be32 *cip_header;
515
516	payload_length = data_blocks * sizeof(__be32) * s->data_block_quadlets;
517	params->payload_length = payload_length;
518
519	if (!(s->flags & CIP_NO_HEADER)) {
520		cip_header = (__be32 *)params->header;
521		generate_cip_header(s, cip_header, data_block_counter, syt);
522		params->header_length = 2 * sizeof(__be32);
523		payload_length += params->header_length;
524	} else {
525		cip_header = NULL;
526	}
527
528	trace_amdtp_packet(s, cycle, cip_header, payload_length, data_blocks,
529			   data_block_counter, s->packet_index, index);
530}
531
532static int check_cip_header(struct amdtp_stream *s, const __be32 *buf,
533			    unsigned int payload_length,
534			    unsigned int *data_blocks,
535			    unsigned int *data_block_counter, unsigned int *syt)
536{
537	u32 cip_header[2];
538	unsigned int sph;
539	unsigned int fmt;
540	unsigned int fdf;
541	unsigned int dbc;
542	bool lost;
543
544	cip_header[0] = be32_to_cpu(buf[0]);
545	cip_header[1] = be32_to_cpu(buf[1]);
546
547	/*
548	 * This module supports 'Two-quadlet CIP header with SYT field'.
549	 * For convenience, also check FMT field is AM824 or not.
550	 */
551	if ((((cip_header[0] & CIP_EOH_MASK) == CIP_EOH) ||
552	     ((cip_header[1] & CIP_EOH_MASK) != CIP_EOH)) &&
553	    (!(s->flags & CIP_HEADER_WITHOUT_EOH))) {
554		dev_info_ratelimited(&s->unit->device,
555				"Invalid CIP header for AMDTP: %08X:%08X\n",
556				cip_header[0], cip_header[1]);
557		return -EAGAIN;
558	}
559
560	/* Check valid protocol or not. */
561	sph = (cip_header[0] & CIP_SPH_MASK) >> CIP_SPH_SHIFT;
562	fmt = (cip_header[1] & CIP_FMT_MASK) >> CIP_FMT_SHIFT;
563	if (sph != s->sph || fmt != s->fmt) {
564		dev_info_ratelimited(&s->unit->device,
565				     "Detect unexpected protocol: %08x %08x\n",
566				     cip_header[0], cip_header[1]);
567		return -EAGAIN;
568	}
569
570	/* Calculate data blocks */
571	fdf = (cip_header[1] & CIP_FDF_MASK) >> CIP_FDF_SHIFT;
572	if (payload_length < sizeof(__be32) * 2 ||
573	    (fmt == CIP_FMT_AM && fdf == AMDTP_FDF_NO_DATA)) {
574		*data_blocks = 0;
575	} else {
576		unsigned int data_block_quadlets =
577				(cip_header[0] & CIP_DBS_MASK) >> CIP_DBS_SHIFT;
578		/* avoid division by zero */
579		if (data_block_quadlets == 0) {
580			dev_err(&s->unit->device,
581				"Detect invalid value in dbs field: %08X\n",
582				cip_header[0]);
583			return -EPROTO;
584		}
585		if (s->flags & CIP_WRONG_DBS)
586			data_block_quadlets = s->data_block_quadlets;
587
588		*data_blocks = (payload_length / sizeof(__be32) - 2) /
589							data_block_quadlets;
590	}
591
592	/* Check data block counter continuity */
593	dbc = cip_header[0] & CIP_DBC_MASK;
594	if (*data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) &&
595	    *data_block_counter != UINT_MAX)
596		dbc = *data_block_counter;
597
598	if ((dbc == 0x00 && (s->flags & CIP_SKIP_DBC_ZERO_CHECK)) ||
599	    *data_block_counter == UINT_MAX) {
600		lost = false;
601	} else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
602		lost = dbc != *data_block_counter;
603	} else {
604		unsigned int dbc_interval;
605
606		if (*data_blocks > 0 && s->ctx_data.tx.dbc_interval > 0)
607			dbc_interval = s->ctx_data.tx.dbc_interval;
608		else
609			dbc_interval = *data_blocks;
610
611		lost = dbc != ((*data_block_counter + dbc_interval) & 0xff);
612	}
613
614	if (lost) {
615		dev_err(&s->unit->device,
616			"Detect discontinuity of CIP: %02X %02X\n",
617			*data_block_counter, dbc);
618		return -EIO;
619	}
620
621	*data_block_counter = dbc;
622
623	*syt = cip_header[1] & CIP_SYT_MASK;
624
625	return 0;
626}
627
628static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle,
629			       const __be32 *ctx_header,
630			       unsigned int *payload_length,
631			       unsigned int *data_blocks,
632			       unsigned int *data_block_counter,
633			       unsigned int *syt, unsigned int packet_index, unsigned int index)
634{
635	const __be32 *cip_header;
636	unsigned int cip_header_size;
637	int err;
638
639	*payload_length = be32_to_cpu(ctx_header[0]) >> ISO_DATA_LENGTH_SHIFT;
640
641	if (!(s->flags & CIP_NO_HEADER))
642		cip_header_size = 8;
643	else
644		cip_header_size = 0;
645
646	if (*payload_length > cip_header_size + s->ctx_data.tx.max_ctx_payload_length) {
647		dev_err(&s->unit->device,
648			"Detect jumbo payload: %04x %04x\n",
649			*payload_length, cip_header_size + s->ctx_data.tx.max_ctx_payload_length);
650		return -EIO;
651	}
652
653	if (cip_header_size > 0) {
654		cip_header = ctx_header + 2;
655		err = check_cip_header(s, cip_header, *payload_length,
656				       data_blocks, data_block_counter, syt);
657		if (err < 0)
658			return err;
659	} else {
660		cip_header = NULL;
661		err = 0;
662		*data_blocks = *payload_length / sizeof(__be32) /
663			       s->data_block_quadlets;
664		*syt = 0;
665
666		if (*data_block_counter == UINT_MAX)
667			*data_block_counter = 0;
668	}
669
670	trace_amdtp_packet(s, cycle, cip_header, *payload_length, *data_blocks,
671			   *data_block_counter, packet_index, index);
672
673	return err;
674}
675
676// In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On
677// the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent
678// it. Thus, via Linux firewire subsystem, we can get the 3 bits for second.
679static inline u32 compute_cycle_count(__be32 ctx_header_tstamp)
680{
681	u32 tstamp = be32_to_cpu(ctx_header_tstamp) & HEADER_TSTAMP_MASK;
682	return (((tstamp >> 13) & 0x07) * 8000) + (tstamp & 0x1fff);
683}
684
685static inline u32 increment_cycle_count(u32 cycle, unsigned int addend)
686{
687	cycle += addend;
688	if (cycle >= OHCI_MAX_SECOND * CYCLES_PER_SECOND)
689		cycle -= OHCI_MAX_SECOND * CYCLES_PER_SECOND;
690	return cycle;
691}
692
693// Align to actual cycle count for the packet which is going to be scheduled.
694// This module queued the same number of isochronous cycle as the size of queue
695// to kip isochronous cycle, therefore it's OK to just increment the cycle by
696// the size of queue for scheduled cycle.
697static inline u32 compute_it_cycle(const __be32 ctx_header_tstamp,
698				   unsigned int queue_size)
699{
700	u32 cycle = compute_cycle_count(ctx_header_tstamp);
701	return increment_cycle_count(cycle, queue_size);
702}
703
704static int generate_device_pkt_descs(struct amdtp_stream *s,
705				     struct pkt_desc *descs,
706				     const __be32 *ctx_header,
707				     unsigned int packets)
708{
709	unsigned int dbc = s->data_block_counter;
710	unsigned int packet_index = s->packet_index;
711	unsigned int queue_size = s->queue_size;
712	int i;
713	int err;
714
715	for (i = 0; i < packets; ++i) {
716		struct pkt_desc *desc = descs + i;
717		unsigned int cycle;
718		unsigned int payload_length;
719		unsigned int data_blocks;
720		unsigned int syt;
721
722		cycle = compute_cycle_count(ctx_header[1]);
723
724		err = parse_ir_ctx_header(s, cycle, ctx_header, &payload_length,
725					  &data_blocks, &dbc, &syt, packet_index, i);
726		if (err < 0)
727			return err;
728
729		desc->cycle = cycle;
730		desc->syt = syt;
731		desc->data_blocks = data_blocks;
732		desc->data_block_counter = dbc;
733		desc->ctx_payload = s->buffer.packets[packet_index].buffer;
734
735		if (!(s->flags & CIP_DBC_IS_END_EVENT))
736			dbc = (dbc + desc->data_blocks) & 0xff;
737
738		ctx_header +=
739			s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
740
741		packet_index = (packet_index + 1) % queue_size;
742	}
743
744	s->data_block_counter = dbc;
745
746	return 0;
747}
748
749static unsigned int compute_syt(unsigned int syt_offset, unsigned int cycle,
750				unsigned int transfer_delay)
751{
752	unsigned int syt;
753
754	syt_offset += transfer_delay;
755	syt = ((cycle + syt_offset / TICKS_PER_CYCLE) << 12) |
756	      (syt_offset % TICKS_PER_CYCLE);
757	return syt & CIP_SYT_MASK;
758}
759
760static void generate_pkt_descs(struct amdtp_stream *s, struct pkt_desc *descs,
761			       const __be32 *ctx_header, unsigned int packets,
762			       const struct seq_desc *seq_descs,
763			       unsigned int seq_size)
764{
765	unsigned int dbc = s->data_block_counter;
766	unsigned int seq_index = s->ctx_data.rx.seq_index;
767	int i;
768
769	for (i = 0; i < packets; ++i) {
770		struct pkt_desc *desc = descs + i;
771		unsigned int index = (s->packet_index + i) % s->queue_size;
772		const struct seq_desc *seq = seq_descs + seq_index;
773		unsigned int syt;
774
775		desc->cycle = compute_it_cycle(*ctx_header, s->queue_size);
776
777		syt = seq->syt_offset;
778		if (syt != CIP_SYT_NO_INFO) {
779			syt = compute_syt(syt, desc->cycle,
780					  s->ctx_data.rx.transfer_delay);
781		}
782		desc->syt = syt;
783		desc->data_blocks = seq->data_blocks;
784
785		if (s->flags & CIP_DBC_IS_END_EVENT)
786			dbc = (dbc + desc->data_blocks) & 0xff;
787
788		desc->data_block_counter = dbc;
789
790		if (!(s->flags & CIP_DBC_IS_END_EVENT))
791			dbc = (dbc + desc->data_blocks) & 0xff;
792
793		desc->ctx_payload = s->buffer.packets[index].buffer;
794
795		seq_index = (seq_index + 1) % seq_size;
796
797		++ctx_header;
798	}
799
800	s->data_block_counter = dbc;
801	s->ctx_data.rx.seq_index = seq_index;
802}
803
804static inline void cancel_stream(struct amdtp_stream *s)
805{
806	s->packet_index = -1;
807	if (in_interrupt())
808		amdtp_stream_pcm_abort(s);
809	WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
810}
811
812static void process_ctx_payloads(struct amdtp_stream *s,
813				 const struct pkt_desc *descs,
814				 unsigned int packets)
815{
816	struct snd_pcm_substream *pcm;
817	unsigned int pcm_frames;
818
819	pcm = READ_ONCE(s->pcm);
820	pcm_frames = s->process_ctx_payloads(s, descs, packets, pcm);
821	if (pcm)
822		update_pcm_pointers(s, pcm, pcm_frames);
823}
824
825static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
826				size_t header_length, void *header,
827				void *private_data)
828{
829	struct amdtp_stream *s = private_data;
830	const struct amdtp_domain *d = s->domain;
831	const __be32 *ctx_header = header;
832	unsigned int events_per_period = s->ctx_data.rx.events_per_period;
833	unsigned int event_count = s->ctx_data.rx.event_count;
834	unsigned int packets;
835	int i;
836
837	if (s->packet_index < 0)
838		return;
839
840	// Calculate the number of packets in buffer and check XRUN.
841	packets = header_length / sizeof(*ctx_header);
842
843	generate_pkt_descs(s, s->pkt_descs, ctx_header, packets, d->seq_descs,
844			   d->seq_size);
845
846	process_ctx_payloads(s, s->pkt_descs, packets);
847
848	for (i = 0; i < packets; ++i) {
849		const struct pkt_desc *desc = s->pkt_descs + i;
850		unsigned int syt;
851		struct {
852			struct fw_iso_packet params;
853			__be32 header[IT_PKT_HEADER_SIZE_CIP / sizeof(__be32)];
854		} template = { {0}, {0} };
855		bool sched_irq = false;
856
857		if (s->ctx_data.rx.syt_override < 0)
858			syt = desc->syt;
859		else
860			syt = s->ctx_data.rx.syt_override;
861
862		build_it_pkt_header(s, desc->cycle, &template.params,
863				    desc->data_blocks, desc->data_block_counter,
864				    syt, i);
865
866		if (s == s->domain->irq_target) {
867			event_count += desc->data_blocks;
868			if (event_count >= events_per_period) {
869				event_count -= events_per_period;
870				sched_irq = true;
871			}
872		}
873
874		if (queue_out_packet(s, &template.params, sched_irq) < 0) {
875			cancel_stream(s);
876			return;
877		}
878	}
879
880	s->ctx_data.rx.event_count = event_count;
881}
882
883static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
884			       size_t header_length, void *header,
885			       void *private_data)
886{
887	struct amdtp_stream *s = private_data;
888	__be32 *ctx_header = header;
889	unsigned int packets;
890	int i;
891	int err;
892
893	if (s->packet_index < 0)
894		return;
895
896	// Calculate the number of packets in buffer and check XRUN.
897	packets = header_length / s->ctx_data.tx.ctx_header_size;
898
899	err = generate_device_pkt_descs(s, s->pkt_descs, ctx_header, packets);
900	if (err < 0) {
901		if (err != -EAGAIN) {
902			cancel_stream(s);
903			return;
904		}
905	} else {
906		process_ctx_payloads(s, s->pkt_descs, packets);
907	}
908
909	for (i = 0; i < packets; ++i) {
910		struct fw_iso_packet params = {0};
911
912		if (queue_in_packet(s, &params) < 0) {
913			cancel_stream(s);
914			return;
915		}
916	}
917}
918
919static void pool_ideal_seq_descs(struct amdtp_domain *d, unsigned int packets)
920{
921	struct amdtp_stream *irq_target = d->irq_target;
922	unsigned int seq_tail = d->seq_tail;
923	unsigned int seq_size = d->seq_size;
924	unsigned int min_avail;
925	struct amdtp_stream *s;
926
927	min_avail = d->seq_size;
928	list_for_each_entry(s, &d->streams, list) {
929		unsigned int seq_index;
930		unsigned int avail;
931
932		if (s->direction == AMDTP_IN_STREAM)
933			continue;
934
935		seq_index = s->ctx_data.rx.seq_index;
936		avail = d->seq_tail;
937		if (seq_index > avail)
938			avail += d->seq_size;
939		avail -= seq_index;
940
941		if (avail < min_avail)
942			min_avail = avail;
943	}
944
945	while (min_avail < packets) {
946		struct seq_desc *desc = d->seq_descs + seq_tail;
947
948		desc->syt_offset = calculate_syt_offset(&d->last_syt_offset,
949					&d->syt_offset_state, irq_target->sfc);
950		desc->data_blocks = calculate_data_blocks(&d->data_block_state,
951				!!(irq_target->flags & CIP_BLOCKING),
952				desc->syt_offset == CIP_SYT_NO_INFO,
953				irq_target->syt_interval, irq_target->sfc);
954
955		++seq_tail;
956		seq_tail %= seq_size;
957
958		++min_avail;
959	}
960
961	d->seq_tail = seq_tail;
962}
963
964static void irq_target_callback(struct fw_iso_context *context, u32 tstamp,
965				size_t header_length, void *header,
966				void *private_data)
967{
968	struct amdtp_stream *irq_target = private_data;
969	struct amdtp_domain *d = irq_target->domain;
970	unsigned int packets = header_length / sizeof(__be32);
971	struct amdtp_stream *s;
972
973	// Record enough entries with extra 3 cycles at least.
974	pool_ideal_seq_descs(d, packets + 3);
975
976	out_stream_callback(context, tstamp, header_length, header, irq_target);
977	if (amdtp_streaming_error(irq_target))
978		goto error;
979
980	list_for_each_entry(s, &d->streams, list) {
981		if (s != irq_target && amdtp_stream_running(s)) {
982			fw_iso_context_flush_completions(s->context);
983			if (amdtp_streaming_error(s))
984				goto error;
985		}
986	}
987
988	return;
989error:
990	if (amdtp_stream_running(irq_target))
991		cancel_stream(irq_target);
992
993	list_for_each_entry(s, &d->streams, list) {
994		if (amdtp_stream_running(s))
995			cancel_stream(s);
996	}
997}
998
999// this is executed one time.
1000static void amdtp_stream_first_callback(struct fw_iso_context *context,
1001					u32 tstamp, size_t header_length,
1002					void *header, void *private_data)
1003{
1004	struct amdtp_stream *s = private_data;
1005	const __be32 *ctx_header = header;
1006	u32 cycle;
1007
1008	/*
1009	 * For in-stream, first packet has come.
1010	 * For out-stream, prepared to transmit first packet
1011	 */
1012	s->callbacked = true;
1013	wake_up(&s->callback_wait);
1014
1015	if (s->direction == AMDTP_IN_STREAM) {
1016		cycle = compute_cycle_count(ctx_header[1]);
1017
1018		context->callback.sc = in_stream_callback;
1019	} else {
1020		cycle = compute_it_cycle(*ctx_header, s->queue_size);
1021
1022		if (s == s->domain->irq_target)
1023			context->callback.sc = irq_target_callback;
1024		else
1025			context->callback.sc = out_stream_callback;
1026	}
1027
1028	s->start_cycle = cycle;
1029
1030	context->callback.sc(context, tstamp, header_length, header, s);
1031}
1032
1033/**
1034 * amdtp_stream_start - start transferring packets
1035 * @s: the AMDTP stream to start
1036 * @channel: the isochronous channel on the bus
1037 * @speed: firewire speed code
1038 * @start_cycle: the isochronous cycle to start the context. Start immediately
1039 *		 if negative value is given.
1040 * @queue_size: The number of packets in the queue.
1041 * @idle_irq_interval: the interval to queue packet during initial state.
1042 *
1043 * The stream cannot be started until it has been configured with
1044 * amdtp_stream_set_parameters() and it must be started before any PCM or MIDI
1045 * device can be started.
1046 */
1047static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
1048			      int start_cycle, unsigned int queue_size,
1049			      unsigned int idle_irq_interval)
1050{
1051	bool is_irq_target = (s == s->domain->irq_target);
1052	unsigned int ctx_header_size;
1053	unsigned int max_ctx_payload_size;
1054	enum dma_data_direction dir;
1055	int type, tag, err;
1056
1057	mutex_lock(&s->mutex);
1058
1059	if (WARN_ON(amdtp_stream_running(s) ||
1060		    (s->data_block_quadlets < 1))) {
1061		err = -EBADFD;
1062		goto err_unlock;
1063	}
1064
1065	if (s->direction == AMDTP_IN_STREAM) {
1066		// NOTE: IT context should be used for constant IRQ.
1067		if (is_irq_target) {
1068			err = -EINVAL;
1069			goto err_unlock;
1070		}
1071
1072		s->data_block_counter = UINT_MAX;
1073	} else {
1074		s->data_block_counter = 0;
1075	}
1076
1077	// initialize packet buffer.
1078	max_ctx_payload_size = amdtp_stream_get_max_payload(s);
1079	if (s->direction == AMDTP_IN_STREAM) {
1080		dir = DMA_FROM_DEVICE;
1081		type = FW_ISO_CONTEXT_RECEIVE;
1082		if (!(s->flags & CIP_NO_HEADER)) {
1083			max_ctx_payload_size -= 8;
1084			ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
1085		} else {
1086			ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
1087		}
1088	} else {
1089		dir = DMA_TO_DEVICE;
1090		type = FW_ISO_CONTEXT_TRANSMIT;
1091		ctx_header_size = 0;	// No effect for IT context.
1092
1093		if (!(s->flags & CIP_NO_HEADER))
1094			max_ctx_payload_size -= IT_PKT_HEADER_SIZE_CIP;
1095	}
1096
1097	err = iso_packets_buffer_init(&s->buffer, s->unit, queue_size,
1098				      max_ctx_payload_size, dir);
1099	if (err < 0)
1100		goto err_unlock;
1101	s->queue_size = queue_size;
1102
1103	s->context = fw_iso_context_create(fw_parent_device(s->unit)->card,
1104					  type, channel, speed, ctx_header_size,
1105					  amdtp_stream_first_callback, s);
1106	if (IS_ERR(s->context)) {
1107		err = PTR_ERR(s->context);
1108		if (err == -EBUSY)
1109			dev_err(&s->unit->device,
1110				"no free stream on this controller\n");
1111		goto err_buffer;
1112	}
1113
1114	amdtp_stream_update(s);
1115
1116	if (s->direction == AMDTP_IN_STREAM) {
1117		s->ctx_data.tx.max_ctx_payload_length = max_ctx_payload_size;
1118		s->ctx_data.tx.ctx_header_size = ctx_header_size;
1119	}
1120
1121	if (s->flags & CIP_NO_HEADER)
1122		s->tag = TAG_NO_CIP_HEADER;
1123	else
1124		s->tag = TAG_CIP;
1125
1126	s->pkt_descs = kcalloc(s->queue_size, sizeof(*s->pkt_descs),
1127			       GFP_KERNEL);
1128	if (!s->pkt_descs) {
1129		err = -ENOMEM;
1130		goto err_context;
1131	}
1132
1133	s->packet_index = 0;
1134	do {
1135		struct fw_iso_packet params;
1136
1137		if (s->direction == AMDTP_IN_STREAM) {
1138			err = queue_in_packet(s, &params);
1139		} else {
1140			bool sched_irq = false;
1141
1142			params.header_length = 0;
1143			params.payload_length = 0;
1144
1145			if (is_irq_target) {
1146				sched_irq = !((s->packet_index + 1) %
1147					      idle_irq_interval);
1148			}
1149
1150			err = queue_out_packet(s, &params, sched_irq);
1151		}
1152		if (err < 0)
1153			goto err_pkt_descs;
1154	} while (s->packet_index > 0);
1155
1156	/* NOTE: TAG1 matches CIP. This just affects in stream. */
1157	tag = FW_ISO_CONTEXT_MATCH_TAG1;
1158	if ((s->flags & CIP_EMPTY_WITH_TAG0) || (s->flags & CIP_NO_HEADER))
1159		tag |= FW_ISO_CONTEXT_MATCH_TAG0;
1160
1161	s->callbacked = false;
1162	err = fw_iso_context_start(s->context, start_cycle, 0, tag);
1163	if (err < 0)
1164		goto err_pkt_descs;
1165
1166	mutex_unlock(&s->mutex);
1167
1168	return 0;
1169err_pkt_descs:
1170	kfree(s->pkt_descs);
1171err_context:
1172	fw_iso_context_destroy(s->context);
1173	s->context = ERR_PTR(-1);
1174err_buffer:
1175	iso_packets_buffer_destroy(&s->buffer, s->unit);
1176err_unlock:
1177	mutex_unlock(&s->mutex);
1178
1179	return err;
1180}
1181
1182/**
1183 * amdtp_domain_stream_pcm_pointer - get the PCM buffer position
1184 * @d: the AMDTP domain.
1185 * @s: the AMDTP stream that transports the PCM data
1186 *
1187 * Returns the current buffer position, in frames.
1188 */
1189unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d,
1190					      struct amdtp_stream *s)
1191{
1192	struct amdtp_stream *irq_target = d->irq_target;
1193
1194	if (irq_target && amdtp_stream_running(irq_target)) {
1195		// This function is called in software IRQ context of
1196		// period_work or process context.
1197		//
1198		// When the software IRQ context was scheduled by software IRQ
1199		// context of IT contexts, queued packets were already handled.
1200		// Therefore, no need to flush the queue in buffer furthermore.
1201		//
1202		// When the process context reach here, some packets will be
1203		// already queued in the buffer. These packets should be handled
1204		// immediately to keep better granularity of PCM pointer.
1205		//
1206		// Later, the process context will sometimes schedules software
1207		// IRQ context of the period_work. Then, no need to flush the
1208		// queue by the same reason as described in the above
1209		if (current_work() != &s->period_work) {
1210			// Queued packet should be processed without any kernel
1211			// preemption to keep latency against bus cycle.
1212			preempt_disable();
1213			fw_iso_context_flush_completions(irq_target->context);
1214			preempt_enable();
1215		}
1216	}
1217
1218	return READ_ONCE(s->pcm_buffer_pointer);
1219}
1220EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_pointer);
1221
1222/**
1223 * amdtp_domain_stream_pcm_ack - acknowledge queued PCM frames
1224 * @d: the AMDTP domain.
1225 * @s: the AMDTP stream that transfers the PCM frames
1226 *
1227 * Returns zero always.
1228 */
1229int amdtp_domain_stream_pcm_ack(struct amdtp_domain *d, struct amdtp_stream *s)
1230{
1231	struct amdtp_stream *irq_target = d->irq_target;
1232
1233	// Process isochronous packets for recent isochronous cycle to handle
1234	// queued PCM frames.
1235	if (irq_target && amdtp_stream_running(irq_target)) {
1236		// Queued packet should be processed without any kernel
1237		// preemption to keep latency against bus cycle.
1238		preempt_disable();
1239		fw_iso_context_flush_completions(irq_target->context);
1240		preempt_enable();
1241	}
1242
1243	return 0;
1244}
1245EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_ack);
1246
1247/**
1248 * amdtp_stream_update - update the stream after a bus reset
1249 * @s: the AMDTP stream
1250 */
1251void amdtp_stream_update(struct amdtp_stream *s)
1252{
1253	/* Precomputing. */
1254	WRITE_ONCE(s->source_node_id_field,
1255                   (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK);
1256}
1257EXPORT_SYMBOL(amdtp_stream_update);
1258
1259/**
1260 * amdtp_stream_stop - stop sending packets
1261 * @s: the AMDTP stream to stop
1262 *
1263 * All PCM and MIDI devices of the stream must be stopped before the stream
1264 * itself can be stopped.
1265 */
1266static void amdtp_stream_stop(struct amdtp_stream *s)
1267{
1268	mutex_lock(&s->mutex);
1269
1270	if (!amdtp_stream_running(s)) {
1271		mutex_unlock(&s->mutex);
1272		return;
1273	}
1274
1275	cancel_work_sync(&s->period_work);
1276	fw_iso_context_stop(s->context);
1277	fw_iso_context_destroy(s->context);
1278	s->context = ERR_PTR(-1);
1279	iso_packets_buffer_destroy(&s->buffer, s->unit);
1280	kfree(s->pkt_descs);
1281
1282	s->callbacked = false;
1283
1284	mutex_unlock(&s->mutex);
1285}
1286
1287/**
1288 * amdtp_stream_pcm_abort - abort the running PCM device
1289 * @s: the AMDTP stream about to be stopped
1290 *
1291 * If the isochronous stream needs to be stopped asynchronously, call this
1292 * function first to stop the PCM device.
1293 */
1294void amdtp_stream_pcm_abort(struct amdtp_stream *s)
1295{
1296	struct snd_pcm_substream *pcm;
1297
1298	pcm = READ_ONCE(s->pcm);
1299	if (pcm)
1300		snd_pcm_stop_xrun(pcm);
1301}
1302EXPORT_SYMBOL(amdtp_stream_pcm_abort);
1303
1304/**
1305 * amdtp_domain_init - initialize an AMDTP domain structure
1306 * @d: the AMDTP domain to initialize.
1307 */
1308int amdtp_domain_init(struct amdtp_domain *d)
1309{
1310	INIT_LIST_HEAD(&d->streams);
1311
1312	d->events_per_period = 0;
1313
1314	d->seq_descs = NULL;
1315
1316	return 0;
1317}
1318EXPORT_SYMBOL_GPL(amdtp_domain_init);
1319
1320/**
1321 * amdtp_domain_destroy - destroy an AMDTP domain structure
1322 * @d: the AMDTP domain to destroy.
1323 */
1324void amdtp_domain_destroy(struct amdtp_domain *d)
1325{
1326	// At present nothing to do.
1327	return;
1328}
1329EXPORT_SYMBOL_GPL(amdtp_domain_destroy);
1330
1331/**
1332 * amdtp_domain_add_stream - register isoc context into the domain.
1333 * @d: the AMDTP domain.
1334 * @s: the AMDTP stream.
1335 * @channel: the isochronous channel on the bus.
1336 * @speed: firewire speed code.
1337 */
1338int amdtp_domain_add_stream(struct amdtp_domain *d, struct amdtp_stream *s,
1339			    int channel, int speed)
1340{
1341	struct amdtp_stream *tmp;
1342
1343	list_for_each_entry(tmp, &d->streams, list) {
1344		if (s == tmp)
1345			return -EBUSY;
1346	}
1347
1348	list_add(&s->list, &d->streams);
1349
1350	s->channel = channel;
1351	s->speed = speed;
1352	s->domain = d;
1353
1354	return 0;
1355}
1356EXPORT_SYMBOL_GPL(amdtp_domain_add_stream);
1357
1358static int get_current_cycle_time(struct fw_card *fw_card, int *cur_cycle)
1359{
1360	int generation;
1361	int rcode;
1362	__be32 reg;
1363	u32 data;
1364
1365	// This is a request to local 1394 OHCI controller and expected to
1366	// complete without any event waiting.
1367	generation = fw_card->generation;
1368	smp_rmb();	// node_id vs. generation.
1369	rcode = fw_run_transaction(fw_card, TCODE_READ_QUADLET_REQUEST,
1370				   fw_card->node_id, generation, SCODE_100,
1371				   CSR_REGISTER_BASE + CSR_CYCLE_TIME,
1372				   &reg, sizeof(reg));
1373	if (rcode != RCODE_COMPLETE)
1374		return -EIO;
1375
1376	data = be32_to_cpu(reg);
1377	*cur_cycle = data >> 12;
1378
1379	return 0;
1380}
1381
1382/**
1383 * amdtp_domain_start - start sending packets for isoc context in the domain.
1384 * @d: the AMDTP domain.
1385 * @ir_delay_cycle: the cycle delay to start all IR contexts.
1386 */
1387int amdtp_domain_start(struct amdtp_domain *d, unsigned int ir_delay_cycle)
1388{
1389	static const struct {
1390		unsigned int data_block;
1391		unsigned int syt_offset;
1392	} *entry, initial_state[] = {
1393		[CIP_SFC_32000]  = {  4, 3072 },
1394		[CIP_SFC_48000]  = {  6, 1024 },
1395		[CIP_SFC_96000]  = { 12, 1024 },
1396		[CIP_SFC_192000] = { 24, 1024 },
1397		[CIP_SFC_44100]  = {  0,   67 },
1398		[CIP_SFC_88200]  = {  0,   67 },
1399		[CIP_SFC_176400] = {  0,   67 },
1400	};
1401	unsigned int events_per_buffer = d->events_per_buffer;
1402	unsigned int events_per_period = d->events_per_period;
1403	unsigned int idle_irq_interval;
1404	unsigned int queue_size;
1405	struct amdtp_stream *s;
1406	int cycle;
1407	bool found = false;
1408	int err;
1409
1410	// Select an IT context as IRQ target.
1411	list_for_each_entry(s, &d->streams, list) {
1412		if (s->direction == AMDTP_OUT_STREAM) {
1413			found = true;
1414			break;
1415		}
1416	}
1417	if (!found)
1418		return -ENXIO;
1419	d->irq_target = s;
1420
1421	// This is a case that AMDTP streams in domain run just for MIDI
1422	// substream. Use the number of events equivalent to 10 msec as
1423	// interval of hardware IRQ.
1424	if (events_per_period == 0)
1425		events_per_period = amdtp_rate_table[d->irq_target->sfc] / 100;
1426	if (events_per_buffer == 0)
1427		events_per_buffer = events_per_period * 3;
1428
1429	queue_size = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_buffer,
1430				  amdtp_rate_table[d->irq_target->sfc]);
1431
1432	d->seq_descs = kcalloc(queue_size, sizeof(*d->seq_descs), GFP_KERNEL);
1433	if (!d->seq_descs)
1434		return -ENOMEM;
1435	d->seq_size = queue_size;
1436	d->seq_tail = 0;
1437
1438	entry = &initial_state[s->sfc];
1439	d->data_block_state = entry->data_block;
1440	d->syt_offset_state = entry->syt_offset;
1441	d->last_syt_offset = TICKS_PER_CYCLE;
1442
1443	if (ir_delay_cycle > 0) {
1444		struct fw_card *fw_card = fw_parent_device(s->unit)->card;
1445
1446		err = get_current_cycle_time(fw_card, &cycle);
1447		if (err < 0)
1448			goto error;
1449
1450		// No need to care overflow in cycle field because of enough
1451		// width.
1452		cycle += ir_delay_cycle;
1453
1454		// Round up to sec field.
1455		if ((cycle & 0x00001fff) >= CYCLES_PER_SECOND) {
1456			unsigned int sec;
1457
1458			// The sec field can overflow.
1459			sec = (cycle & 0xffffe000) >> 13;
1460			cycle = (++sec << 13) |
1461				((cycle & 0x00001fff) / CYCLES_PER_SECOND);
1462		}
1463
1464		// In OHCI 1394 specification, lower 2 bits are available for
1465		// sec field.
1466		cycle &= 0x00007fff;
1467	} else {
1468		cycle = -1;
1469	}
1470
1471	list_for_each_entry(s, &d->streams, list) {
1472		int cycle_match;
1473
1474		if (s->direction == AMDTP_IN_STREAM) {
1475			cycle_match = cycle;
1476		} else {
1477			// IT context starts immediately.
1478			cycle_match = -1;
1479			s->ctx_data.rx.seq_index = 0;
1480		}
1481
1482		if (s != d->irq_target) {
1483			err = amdtp_stream_start(s, s->channel, s->speed,
1484						 cycle_match, queue_size, 0);
1485			if (err < 0)
1486				goto error;
1487		}
1488	}
1489
1490	s = d->irq_target;
1491	s->ctx_data.rx.events_per_period = events_per_period;
1492	s->ctx_data.rx.event_count = 0;
1493	s->ctx_data.rx.seq_index = 0;
1494
1495	idle_irq_interval = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_period,
1496					 amdtp_rate_table[d->irq_target->sfc]);
1497	err = amdtp_stream_start(s, s->channel, s->speed, -1, queue_size,
1498				 idle_irq_interval);
1499	if (err < 0)
1500		goto error;
1501
1502	return 0;
1503error:
1504	list_for_each_entry(s, &d->streams, list)
1505		amdtp_stream_stop(s);
1506	kfree(d->seq_descs);
1507	d->seq_descs = NULL;
1508	return err;
1509}
1510EXPORT_SYMBOL_GPL(amdtp_domain_start);
1511
1512/**
1513 * amdtp_domain_stop - stop sending packets for isoc context in the same domain.
1514 * @d: the AMDTP domain to which the isoc contexts belong.
1515 */
1516void amdtp_domain_stop(struct amdtp_domain *d)
1517{
1518	struct amdtp_stream *s, *next;
1519
1520	if (d->irq_target)
1521		amdtp_stream_stop(d->irq_target);
1522
1523	list_for_each_entry_safe(s, next, &d->streams, list) {
1524		list_del(&s->list);
1525
1526		if (s != d->irq_target)
1527			amdtp_stream_stop(s);
1528	}
1529
1530	d->events_per_period = 0;
1531	d->irq_target = NULL;
1532
1533	kfree(d->seq_descs);
1534	d->seq_descs = NULL;
1535}
1536EXPORT_SYMBOL_GPL(amdtp_domain_stop);
1537