1/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2/*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
6
7#ifndef RXE_HDR_H
8#define RXE_HDR_H
9
10/* extracted information about a packet carried in an sk_buff struct fits in
11 * the skbuff cb array. Must be at most 48 bytes. stored in control block of
12 * sk_buff for received packets.
13 */
14struct rxe_pkt_info {
15	struct rxe_dev		*rxe;		/* device that owns packet */
16	struct rxe_qp		*qp;		/* qp that owns packet */
17	struct rxe_send_wqe	*wqe;		/* send wqe */
18	u8			*hdr;		/* points to bth */
19	u32			mask;		/* useful info about pkt */
20	u32			psn;		/* bth psn of packet */
21	u16			pkey_index;	/* partition of pkt */
22	u16			paylen;		/* length of bth - icrc */
23	u8			port_num;	/* port pkt received on */
24	u8			opcode;		/* bth opcode of packet */
25	u8			offset;		/* bth offset from pkt->hdr */
26};
27
28/* Macros should be used only for received skb */
29static inline struct rxe_pkt_info *SKB_TO_PKT(struct sk_buff *skb)
30{
31	BUILD_BUG_ON(sizeof(struct rxe_pkt_info) > sizeof(skb->cb));
32	return (void *)skb->cb;
33}
34
35static inline struct sk_buff *PKT_TO_SKB(struct rxe_pkt_info *pkt)
36{
37	return container_of((void *)pkt, struct sk_buff, cb);
38}
39
40/*
41 * IBA header types and methods
42 *
43 * Some of these are for reference and completeness only since
44 * rxe does not currently support RD transport
45 * most of this could be moved into IB core. ib_pack.h has
46 * part of this but is incomplete
47 *
48 * Header specific routines to insert/extract values to/from headers
49 * the routines that are named __hhh_(set_)fff() take a pointer to a
50 * hhh header and get(set) the fff field. The routines named
51 * hhh_(set_)fff take a packet info struct and find the
52 * header and field based on the opcode in the packet.
53 * Conversion to/from network byte order from cpu order is also done.
54 */
55
56#define RXE_ICRC_SIZE		(4)
57#define RXE_MAX_HDR_LENGTH	(80)
58
59/******************************************************************************
60 * Base Transport Header
61 ******************************************************************************/
62struct rxe_bth {
63	u8			opcode;
64	u8			flags;
65	__be16			pkey;
66	__be32			qpn;
67	__be32			apsn;
68};
69
70#define BTH_TVER		(0)
71#define BTH_DEF_PKEY		(0xffff)
72
73#define BTH_SE_MASK		(0x80)
74#define BTH_MIG_MASK		(0x40)
75#define BTH_PAD_MASK		(0x30)
76#define BTH_TVER_MASK		(0x0f)
77#define BTH_FECN_MASK		(0x80000000)
78#define BTH_BECN_MASK		(0x40000000)
79#define BTH_RESV6A_MASK		(0x3f000000)
80#define BTH_QPN_MASK		(0x00ffffff)
81#define BTH_ACK_MASK		(0x80000000)
82#define BTH_RESV7_MASK		(0x7f000000)
83#define BTH_PSN_MASK		(0x00ffffff)
84
85static inline u8 __bth_opcode(void *arg)
86{
87	struct rxe_bth *bth = arg;
88
89	return bth->opcode;
90}
91
92static inline void __bth_set_opcode(void *arg, u8 opcode)
93{
94	struct rxe_bth *bth = arg;
95
96	bth->opcode = opcode;
97}
98
99static inline u8 __bth_se(void *arg)
100{
101	struct rxe_bth *bth = arg;
102
103	return 0 != (BTH_SE_MASK & bth->flags);
104}
105
106static inline void __bth_set_se(void *arg, int se)
107{
108	struct rxe_bth *bth = arg;
109
110	if (se)
111		bth->flags |= BTH_SE_MASK;
112	else
113		bth->flags &= ~BTH_SE_MASK;
114}
115
116static inline u8 __bth_mig(void *arg)
117{
118	struct rxe_bth *bth = arg;
119
120	return 0 != (BTH_MIG_MASK & bth->flags);
121}
122
123static inline void __bth_set_mig(void *arg, u8 mig)
124{
125	struct rxe_bth *bth = arg;
126
127	if (mig)
128		bth->flags |= BTH_MIG_MASK;
129	else
130		bth->flags &= ~BTH_MIG_MASK;
131}
132
133static inline u8 __bth_pad(void *arg)
134{
135	struct rxe_bth *bth = arg;
136
137	return (BTH_PAD_MASK & bth->flags) >> 4;
138}
139
140static inline void __bth_set_pad(void *arg, u8 pad)
141{
142	struct rxe_bth *bth = arg;
143
144	bth->flags = (BTH_PAD_MASK & (pad << 4)) |
145			(~BTH_PAD_MASK & bth->flags);
146}
147
148static inline u8 __bth_tver(void *arg)
149{
150	struct rxe_bth *bth = arg;
151
152	return BTH_TVER_MASK & bth->flags;
153}
154
155static inline void __bth_set_tver(void *arg, u8 tver)
156{
157	struct rxe_bth *bth = arg;
158
159	bth->flags = (BTH_TVER_MASK & tver) |
160			(~BTH_TVER_MASK & bth->flags);
161}
162
163static inline u16 __bth_pkey(void *arg)
164{
165	struct rxe_bth *bth = arg;
166
167	return be16_to_cpu(bth->pkey);
168}
169
170static inline void __bth_set_pkey(void *arg, u16 pkey)
171{
172	struct rxe_bth *bth = arg;
173
174	bth->pkey = cpu_to_be16(pkey);
175}
176
177static inline u32 __bth_qpn(void *arg)
178{
179	struct rxe_bth *bth = arg;
180
181	return BTH_QPN_MASK & be32_to_cpu(bth->qpn);
182}
183
184static inline void __bth_set_qpn(void *arg, u32 qpn)
185{
186	struct rxe_bth *bth = arg;
187	u32 resvqpn = be32_to_cpu(bth->qpn);
188
189	bth->qpn = cpu_to_be32((BTH_QPN_MASK & qpn) |
190			       (~BTH_QPN_MASK & resvqpn));
191}
192
193static inline int __bth_fecn(void *arg)
194{
195	struct rxe_bth *bth = arg;
196
197	return 0 != (cpu_to_be32(BTH_FECN_MASK) & bth->qpn);
198}
199
200static inline void __bth_set_fecn(void *arg, int fecn)
201{
202	struct rxe_bth *bth = arg;
203
204	if (fecn)
205		bth->qpn |= cpu_to_be32(BTH_FECN_MASK);
206	else
207		bth->qpn &= ~cpu_to_be32(BTH_FECN_MASK);
208}
209
210static inline int __bth_becn(void *arg)
211{
212	struct rxe_bth *bth = arg;
213
214	return 0 != (cpu_to_be32(BTH_BECN_MASK) & bth->qpn);
215}
216
217static inline void __bth_set_becn(void *arg, int becn)
218{
219	struct rxe_bth *bth = arg;
220
221	if (becn)
222		bth->qpn |= cpu_to_be32(BTH_BECN_MASK);
223	else
224		bth->qpn &= ~cpu_to_be32(BTH_BECN_MASK);
225}
226
227static inline u8 __bth_resv6a(void *arg)
228{
229	struct rxe_bth *bth = arg;
230
231	return (BTH_RESV6A_MASK & be32_to_cpu(bth->qpn)) >> 24;
232}
233
234static inline void __bth_set_resv6a(void *arg)
235{
236	struct rxe_bth *bth = arg;
237
238	bth->qpn = cpu_to_be32(~BTH_RESV6A_MASK);
239}
240
241static inline int __bth_ack(void *arg)
242{
243	struct rxe_bth *bth = arg;
244
245	return 0 != (cpu_to_be32(BTH_ACK_MASK) & bth->apsn);
246}
247
248static inline void __bth_set_ack(void *arg, int ack)
249{
250	struct rxe_bth *bth = arg;
251
252	if (ack)
253		bth->apsn |= cpu_to_be32(BTH_ACK_MASK);
254	else
255		bth->apsn &= ~cpu_to_be32(BTH_ACK_MASK);
256}
257
258static inline void __bth_set_resv7(void *arg)
259{
260	struct rxe_bth *bth = arg;
261
262	bth->apsn &= ~cpu_to_be32(BTH_RESV7_MASK);
263}
264
265static inline u32 __bth_psn(void *arg)
266{
267	struct rxe_bth *bth = arg;
268
269	return BTH_PSN_MASK & be32_to_cpu(bth->apsn);
270}
271
272static inline void __bth_set_psn(void *arg, u32 psn)
273{
274	struct rxe_bth *bth = arg;
275	u32 apsn = be32_to_cpu(bth->apsn);
276
277	bth->apsn = cpu_to_be32((BTH_PSN_MASK & psn) |
278			(~BTH_PSN_MASK & apsn));
279}
280
281static inline u8 bth_opcode(struct rxe_pkt_info *pkt)
282{
283	return __bth_opcode(pkt->hdr + pkt->offset);
284}
285
286static inline void bth_set_opcode(struct rxe_pkt_info *pkt, u8 opcode)
287{
288	__bth_set_opcode(pkt->hdr + pkt->offset, opcode);
289}
290
291static inline u8 bth_se(struct rxe_pkt_info *pkt)
292{
293	return __bth_se(pkt->hdr + pkt->offset);
294}
295
296static inline void bth_set_se(struct rxe_pkt_info *pkt, int se)
297{
298	__bth_set_se(pkt->hdr + pkt->offset, se);
299}
300
301static inline u8 bth_mig(struct rxe_pkt_info *pkt)
302{
303	return __bth_mig(pkt->hdr + pkt->offset);
304}
305
306static inline void bth_set_mig(struct rxe_pkt_info *pkt, u8 mig)
307{
308	__bth_set_mig(pkt->hdr + pkt->offset, mig);
309}
310
311static inline u8 bth_pad(struct rxe_pkt_info *pkt)
312{
313	return __bth_pad(pkt->hdr + pkt->offset);
314}
315
316static inline void bth_set_pad(struct rxe_pkt_info *pkt, u8 pad)
317{
318	__bth_set_pad(pkt->hdr + pkt->offset, pad);
319}
320
321static inline u8 bth_tver(struct rxe_pkt_info *pkt)
322{
323	return __bth_tver(pkt->hdr + pkt->offset);
324}
325
326static inline void bth_set_tver(struct rxe_pkt_info *pkt, u8 tver)
327{
328	__bth_set_tver(pkt->hdr + pkt->offset, tver);
329}
330
331static inline u16 bth_pkey(struct rxe_pkt_info *pkt)
332{
333	return __bth_pkey(pkt->hdr + pkt->offset);
334}
335
336static inline void bth_set_pkey(struct rxe_pkt_info *pkt, u16 pkey)
337{
338	__bth_set_pkey(pkt->hdr + pkt->offset, pkey);
339}
340
341static inline u32 bth_qpn(struct rxe_pkt_info *pkt)
342{
343	return __bth_qpn(pkt->hdr + pkt->offset);
344}
345
346static inline void bth_set_qpn(struct rxe_pkt_info *pkt, u32 qpn)
347{
348	__bth_set_qpn(pkt->hdr + pkt->offset, qpn);
349}
350
351static inline int bth_fecn(struct rxe_pkt_info *pkt)
352{
353	return __bth_fecn(pkt->hdr + pkt->offset);
354}
355
356static inline void bth_set_fecn(struct rxe_pkt_info *pkt, int fecn)
357{
358	__bth_set_fecn(pkt->hdr + pkt->offset, fecn);
359}
360
361static inline int bth_becn(struct rxe_pkt_info *pkt)
362{
363	return __bth_becn(pkt->hdr + pkt->offset);
364}
365
366static inline void bth_set_becn(struct rxe_pkt_info *pkt, int becn)
367{
368	__bth_set_becn(pkt->hdr + pkt->offset, becn);
369}
370
371static inline u8 bth_resv6a(struct rxe_pkt_info *pkt)
372{
373	return __bth_resv6a(pkt->hdr + pkt->offset);
374}
375
376static inline void bth_set_resv6a(struct rxe_pkt_info *pkt)
377{
378	__bth_set_resv6a(pkt->hdr + pkt->offset);
379}
380
381static inline int bth_ack(struct rxe_pkt_info *pkt)
382{
383	return __bth_ack(pkt->hdr + pkt->offset);
384}
385
386static inline void bth_set_ack(struct rxe_pkt_info *pkt, int ack)
387{
388	__bth_set_ack(pkt->hdr + pkt->offset, ack);
389}
390
391static inline void bth_set_resv7(struct rxe_pkt_info *pkt)
392{
393	__bth_set_resv7(pkt->hdr + pkt->offset);
394}
395
396static inline u32 bth_psn(struct rxe_pkt_info *pkt)
397{
398	return __bth_psn(pkt->hdr + pkt->offset);
399}
400
401static inline void bth_set_psn(struct rxe_pkt_info *pkt, u32 psn)
402{
403	__bth_set_psn(pkt->hdr + pkt->offset, psn);
404}
405
406static inline void bth_init(struct rxe_pkt_info *pkt, u8 opcode, int se,
407			    int mig, int pad, u16 pkey, u32 qpn, int ack_req,
408			    u32 psn)
409{
410	struct rxe_bth *bth = (struct rxe_bth *)(pkt->hdr + pkt->offset);
411
412	bth->opcode = opcode;
413	bth->flags = (pad << 4) & BTH_PAD_MASK;
414	if (se)
415		bth->flags |= BTH_SE_MASK;
416	if (mig)
417		bth->flags |= BTH_MIG_MASK;
418	bth->pkey = cpu_to_be16(pkey);
419	bth->qpn = cpu_to_be32(qpn & BTH_QPN_MASK);
420	psn &= BTH_PSN_MASK;
421	if (ack_req)
422		psn |= BTH_ACK_MASK;
423	bth->apsn = cpu_to_be32(psn);
424}
425
426/******************************************************************************
427 * Reliable Datagram Extended Transport Header
428 ******************************************************************************/
429struct rxe_rdeth {
430	__be32			een;
431};
432
433#define RDETH_EEN_MASK		(0x00ffffff)
434
435static inline u8 __rdeth_een(void *arg)
436{
437	struct rxe_rdeth *rdeth = arg;
438
439	return RDETH_EEN_MASK & be32_to_cpu(rdeth->een);
440}
441
442static inline void __rdeth_set_een(void *arg, u32 een)
443{
444	struct rxe_rdeth *rdeth = arg;
445
446	rdeth->een = cpu_to_be32(RDETH_EEN_MASK & een);
447}
448
449static inline u8 rdeth_een(struct rxe_pkt_info *pkt)
450{
451	return __rdeth_een(pkt->hdr + pkt->offset
452		+ rxe_opcode[pkt->opcode].offset[RXE_RDETH]);
453}
454
455static inline void rdeth_set_een(struct rxe_pkt_info *pkt, u32 een)
456{
457	__rdeth_set_een(pkt->hdr + pkt->offset
458		+ rxe_opcode[pkt->opcode].offset[RXE_RDETH], een);
459}
460
461/******************************************************************************
462 * Datagram Extended Transport Header
463 ******************************************************************************/
464struct rxe_deth {
465	__be32			qkey;
466	__be32			sqp;
467};
468
469#define GSI_QKEY		(0x80010000)
470#define DETH_SQP_MASK		(0x00ffffff)
471
472static inline u32 __deth_qkey(void *arg)
473{
474	struct rxe_deth *deth = arg;
475
476	return be32_to_cpu(deth->qkey);
477}
478
479static inline void __deth_set_qkey(void *arg, u32 qkey)
480{
481	struct rxe_deth *deth = arg;
482
483	deth->qkey = cpu_to_be32(qkey);
484}
485
486static inline u32 __deth_sqp(void *arg)
487{
488	struct rxe_deth *deth = arg;
489
490	return DETH_SQP_MASK & be32_to_cpu(deth->sqp);
491}
492
493static inline void __deth_set_sqp(void *arg, u32 sqp)
494{
495	struct rxe_deth *deth = arg;
496
497	deth->sqp = cpu_to_be32(DETH_SQP_MASK & sqp);
498}
499
500static inline u32 deth_qkey(struct rxe_pkt_info *pkt)
501{
502	return __deth_qkey(pkt->hdr + pkt->offset
503		+ rxe_opcode[pkt->opcode].offset[RXE_DETH]);
504}
505
506static inline void deth_set_qkey(struct rxe_pkt_info *pkt, u32 qkey)
507{
508	__deth_set_qkey(pkt->hdr + pkt->offset
509		+ rxe_opcode[pkt->opcode].offset[RXE_DETH], qkey);
510}
511
512static inline u32 deth_sqp(struct rxe_pkt_info *pkt)
513{
514	return __deth_sqp(pkt->hdr + pkt->offset
515		+ rxe_opcode[pkt->opcode].offset[RXE_DETH]);
516}
517
518static inline void deth_set_sqp(struct rxe_pkt_info *pkt, u32 sqp)
519{
520	__deth_set_sqp(pkt->hdr + pkt->offset
521		+ rxe_opcode[pkt->opcode].offset[RXE_DETH], sqp);
522}
523
524/******************************************************************************
525 * RDMA Extended Transport Header
526 ******************************************************************************/
527struct rxe_reth {
528	__be64			va;
529	__be32			rkey;
530	__be32			len;
531};
532
533static inline u64 __reth_va(void *arg)
534{
535	struct rxe_reth *reth = arg;
536
537	return be64_to_cpu(reth->va);
538}
539
540static inline void __reth_set_va(void *arg, u64 va)
541{
542	struct rxe_reth *reth = arg;
543
544	reth->va = cpu_to_be64(va);
545}
546
547static inline u32 __reth_rkey(void *arg)
548{
549	struct rxe_reth *reth = arg;
550
551	return be32_to_cpu(reth->rkey);
552}
553
554static inline void __reth_set_rkey(void *arg, u32 rkey)
555{
556	struct rxe_reth *reth = arg;
557
558	reth->rkey = cpu_to_be32(rkey);
559}
560
561static inline u32 __reth_len(void *arg)
562{
563	struct rxe_reth *reth = arg;
564
565	return be32_to_cpu(reth->len);
566}
567
568static inline void __reth_set_len(void *arg, u32 len)
569{
570	struct rxe_reth *reth = arg;
571
572	reth->len = cpu_to_be32(len);
573}
574
575static inline u64 reth_va(struct rxe_pkt_info *pkt)
576{
577	return __reth_va(pkt->hdr + pkt->offset
578		+ rxe_opcode[pkt->opcode].offset[RXE_RETH]);
579}
580
581static inline void reth_set_va(struct rxe_pkt_info *pkt, u64 va)
582{
583	__reth_set_va(pkt->hdr + pkt->offset
584		+ rxe_opcode[pkt->opcode].offset[RXE_RETH], va);
585}
586
587static inline u32 reth_rkey(struct rxe_pkt_info *pkt)
588{
589	return __reth_rkey(pkt->hdr + pkt->offset
590		+ rxe_opcode[pkt->opcode].offset[RXE_RETH]);
591}
592
593static inline void reth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
594{
595	__reth_set_rkey(pkt->hdr + pkt->offset
596		+ rxe_opcode[pkt->opcode].offset[RXE_RETH], rkey);
597}
598
599static inline u32 reth_len(struct rxe_pkt_info *pkt)
600{
601	return __reth_len(pkt->hdr + pkt->offset
602		+ rxe_opcode[pkt->opcode].offset[RXE_RETH]);
603}
604
605static inline void reth_set_len(struct rxe_pkt_info *pkt, u32 len)
606{
607	__reth_set_len(pkt->hdr + pkt->offset
608		+ rxe_opcode[pkt->opcode].offset[RXE_RETH], len);
609}
610
611/******************************************************************************
612 * Atomic Extended Transport Header
613 ******************************************************************************/
614struct rxe_atmeth {
615	__be64			va;
616	__be32			rkey;
617	__be64			swap_add;
618	__be64			comp;
619} __packed;
620
621static inline u64 __atmeth_va(void *arg)
622{
623	struct rxe_atmeth *atmeth = arg;
624
625	return be64_to_cpu(atmeth->va);
626}
627
628static inline void __atmeth_set_va(void *arg, u64 va)
629{
630	struct rxe_atmeth *atmeth = arg;
631
632	atmeth->va = cpu_to_be64(va);
633}
634
635static inline u32 __atmeth_rkey(void *arg)
636{
637	struct rxe_atmeth *atmeth = arg;
638
639	return be32_to_cpu(atmeth->rkey);
640}
641
642static inline void __atmeth_set_rkey(void *arg, u32 rkey)
643{
644	struct rxe_atmeth *atmeth = arg;
645
646	atmeth->rkey = cpu_to_be32(rkey);
647}
648
649static inline u64 __atmeth_swap_add(void *arg)
650{
651	struct rxe_atmeth *atmeth = arg;
652
653	return be64_to_cpu(atmeth->swap_add);
654}
655
656static inline void __atmeth_set_swap_add(void *arg, u64 swap_add)
657{
658	struct rxe_atmeth *atmeth = arg;
659
660	atmeth->swap_add = cpu_to_be64(swap_add);
661}
662
663static inline u64 __atmeth_comp(void *arg)
664{
665	struct rxe_atmeth *atmeth = arg;
666
667	return be64_to_cpu(atmeth->comp);
668}
669
670static inline void __atmeth_set_comp(void *arg, u64 comp)
671{
672	struct rxe_atmeth *atmeth = arg;
673
674	atmeth->comp = cpu_to_be64(comp);
675}
676
677static inline u64 atmeth_va(struct rxe_pkt_info *pkt)
678{
679	return __atmeth_va(pkt->hdr + pkt->offset
680		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
681}
682
683static inline void atmeth_set_va(struct rxe_pkt_info *pkt, u64 va)
684{
685	__atmeth_set_va(pkt->hdr + pkt->offset
686		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], va);
687}
688
689static inline u32 atmeth_rkey(struct rxe_pkt_info *pkt)
690{
691	return __atmeth_rkey(pkt->hdr + pkt->offset
692		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
693}
694
695static inline void atmeth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
696{
697	__atmeth_set_rkey(pkt->hdr + pkt->offset
698		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], rkey);
699}
700
701static inline u64 atmeth_swap_add(struct rxe_pkt_info *pkt)
702{
703	return __atmeth_swap_add(pkt->hdr + pkt->offset
704		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
705}
706
707static inline void atmeth_set_swap_add(struct rxe_pkt_info *pkt, u64 swap_add)
708{
709	__atmeth_set_swap_add(pkt->hdr + pkt->offset
710		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], swap_add);
711}
712
713static inline u64 atmeth_comp(struct rxe_pkt_info *pkt)
714{
715	return __atmeth_comp(pkt->hdr + pkt->offset
716		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
717}
718
719static inline void atmeth_set_comp(struct rxe_pkt_info *pkt, u64 comp)
720{
721	__atmeth_set_comp(pkt->hdr + pkt->offset
722		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], comp);
723}
724
725/******************************************************************************
726 * Ack Extended Transport Header
727 ******************************************************************************/
728struct rxe_aeth {
729	__be32			smsn;
730};
731
732#define AETH_SYN_MASK		(0xff000000)
733#define AETH_MSN_MASK		(0x00ffffff)
734
735enum aeth_syndrome {
736	AETH_TYPE_MASK		= 0xe0,
737	AETH_ACK		= 0x00,
738	AETH_RNR_NAK		= 0x20,
739	AETH_RSVD		= 0x40,
740	AETH_NAK		= 0x60,
741	AETH_ACK_UNLIMITED	= 0x1f,
742	AETH_NAK_PSN_SEQ_ERROR	= 0x60,
743	AETH_NAK_INVALID_REQ	= 0x61,
744	AETH_NAK_REM_ACC_ERR	= 0x62,
745	AETH_NAK_REM_OP_ERR	= 0x63,
746	AETH_NAK_INV_RD_REQ	= 0x64,
747};
748
749static inline u8 __aeth_syn(void *arg)
750{
751	struct rxe_aeth *aeth = arg;
752
753	return (AETH_SYN_MASK & be32_to_cpu(aeth->smsn)) >> 24;
754}
755
756static inline void __aeth_set_syn(void *arg, u8 syn)
757{
758	struct rxe_aeth *aeth = arg;
759	u32 smsn = be32_to_cpu(aeth->smsn);
760
761	aeth->smsn = cpu_to_be32((AETH_SYN_MASK & (syn << 24)) |
762			 (~AETH_SYN_MASK & smsn));
763}
764
765static inline u32 __aeth_msn(void *arg)
766{
767	struct rxe_aeth *aeth = arg;
768
769	return AETH_MSN_MASK & be32_to_cpu(aeth->smsn);
770}
771
772static inline void __aeth_set_msn(void *arg, u32 msn)
773{
774	struct rxe_aeth *aeth = arg;
775	u32 smsn = be32_to_cpu(aeth->smsn);
776
777	aeth->smsn = cpu_to_be32((AETH_MSN_MASK & msn) |
778			 (~AETH_MSN_MASK & smsn));
779}
780
781static inline u8 aeth_syn(struct rxe_pkt_info *pkt)
782{
783	return __aeth_syn(pkt->hdr + pkt->offset
784		+ rxe_opcode[pkt->opcode].offset[RXE_AETH]);
785}
786
787static inline void aeth_set_syn(struct rxe_pkt_info *pkt, u8 syn)
788{
789	__aeth_set_syn(pkt->hdr + pkt->offset
790		+ rxe_opcode[pkt->opcode].offset[RXE_AETH], syn);
791}
792
793static inline u32 aeth_msn(struct rxe_pkt_info *pkt)
794{
795	return __aeth_msn(pkt->hdr + pkt->offset
796		+ rxe_opcode[pkt->opcode].offset[RXE_AETH]);
797}
798
799static inline void aeth_set_msn(struct rxe_pkt_info *pkt, u32 msn)
800{
801	__aeth_set_msn(pkt->hdr + pkt->offset
802		+ rxe_opcode[pkt->opcode].offset[RXE_AETH], msn);
803}
804
805/******************************************************************************
806 * Atomic Ack Extended Transport Header
807 ******************************************************************************/
808struct rxe_atmack {
809	__be64			orig;
810};
811
812static inline u64 __atmack_orig(void *arg)
813{
814	struct rxe_atmack *atmack = arg;
815
816	return be64_to_cpu(atmack->orig);
817}
818
819static inline void __atmack_set_orig(void *arg, u64 orig)
820{
821	struct rxe_atmack *atmack = arg;
822
823	atmack->orig = cpu_to_be64(orig);
824}
825
826static inline u64 atmack_orig(struct rxe_pkt_info *pkt)
827{
828	return __atmack_orig(pkt->hdr + pkt->offset
829		+ rxe_opcode[pkt->opcode].offset[RXE_ATMACK]);
830}
831
832static inline void atmack_set_orig(struct rxe_pkt_info *pkt, u64 orig)
833{
834	__atmack_set_orig(pkt->hdr + pkt->offset
835		+ rxe_opcode[pkt->opcode].offset[RXE_ATMACK], orig);
836}
837
838/******************************************************************************
839 * Immediate Extended Transport Header
840 ******************************************************************************/
841struct rxe_immdt {
842	__be32			imm;
843};
844
845static inline __be32 __immdt_imm(void *arg)
846{
847	struct rxe_immdt *immdt = arg;
848
849	return immdt->imm;
850}
851
852static inline void __immdt_set_imm(void *arg, __be32 imm)
853{
854	struct rxe_immdt *immdt = arg;
855
856	immdt->imm = imm;
857}
858
859static inline __be32 immdt_imm(struct rxe_pkt_info *pkt)
860{
861	return __immdt_imm(pkt->hdr + pkt->offset
862		+ rxe_opcode[pkt->opcode].offset[RXE_IMMDT]);
863}
864
865static inline void immdt_set_imm(struct rxe_pkt_info *pkt, __be32 imm)
866{
867	__immdt_set_imm(pkt->hdr + pkt->offset
868		+ rxe_opcode[pkt->opcode].offset[RXE_IMMDT], imm);
869}
870
871/******************************************************************************
872 * Invalidate Extended Transport Header
873 ******************************************************************************/
874struct rxe_ieth {
875	__be32			rkey;
876};
877
878static inline u32 __ieth_rkey(void *arg)
879{
880	struct rxe_ieth *ieth = arg;
881
882	return be32_to_cpu(ieth->rkey);
883}
884
885static inline void __ieth_set_rkey(void *arg, u32 rkey)
886{
887	struct rxe_ieth *ieth = arg;
888
889	ieth->rkey = cpu_to_be32(rkey);
890}
891
892static inline u32 ieth_rkey(struct rxe_pkt_info *pkt)
893{
894	return __ieth_rkey(pkt->hdr + pkt->offset
895		+ rxe_opcode[pkt->opcode].offset[RXE_IETH]);
896}
897
898static inline void ieth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
899{
900	__ieth_set_rkey(pkt->hdr + pkt->offset
901		+ rxe_opcode[pkt->opcode].offset[RXE_IETH], rkey);
902}
903
904enum rxe_hdr_length {
905	RXE_BTH_BYTES		= sizeof(struct rxe_bth),
906	RXE_DETH_BYTES		= sizeof(struct rxe_deth),
907	RXE_IMMDT_BYTES		= sizeof(struct rxe_immdt),
908	RXE_RETH_BYTES		= sizeof(struct rxe_reth),
909	RXE_AETH_BYTES		= sizeof(struct rxe_aeth),
910	RXE_ATMACK_BYTES	= sizeof(struct rxe_atmack),
911	RXE_ATMETH_BYTES	= sizeof(struct rxe_atmeth),
912	RXE_IETH_BYTES		= sizeof(struct rxe_ieth),
913	RXE_RDETH_BYTES		= sizeof(struct rxe_rdeth),
914};
915
916static inline size_t header_size(struct rxe_pkt_info *pkt)
917{
918	return pkt->offset + rxe_opcode[pkt->opcode].length;
919}
920
921static inline void *payload_addr(struct rxe_pkt_info *pkt)
922{
923	return pkt->hdr + pkt->offset
924		+ rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD];
925}
926
927static inline size_t payload_size(struct rxe_pkt_info *pkt)
928{
929	return pkt->paylen - rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD]
930		- bth_pad(pkt) - RXE_ICRC_SIZE;
931}
932
933#endif /* RXE_HDR_H */
934