1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 *  Copyright IBM Corp. 2001, 2019
4 *  Author(s): Robert Burroughs
5 *	       Eric Rossman (edrossma@us.ibm.com)
6 *	       Cornelia Huck <cornelia.huck@de.ibm.com>
7 *
8 *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
9 *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
10 *				  Ralph Wuerthner <rwuerthn@de.ibm.com>
11 *  MSGTYPE restruct:		  Holger Dengler <hd@linux.vnet.ibm.com>
12 */
13
14#ifndef _ZCRYPT_API_H_
15#define _ZCRYPT_API_H_
16
17#include <linux/atomic.h>
18#include <asm/debug.h>
19#include <asm/zcrypt.h>
20#include "ap_bus.h"
21
22/**
23 * Supported device types
24 */
25#define ZCRYPT_CEX2C		5
26#define ZCRYPT_CEX2A		6
27#define ZCRYPT_CEX3C		7
28#define ZCRYPT_CEX3A		8
29#define ZCRYPT_CEX4	       10
30#define ZCRYPT_CEX5	       11
31#define ZCRYPT_CEX6	       12
32#define ZCRYPT_CEX7	       13
33
34/**
35 * Large random numbers are pulled in 4096 byte chunks from the crypto cards
36 * and stored in a page. Be careful when increasing this buffer due to size
37 * limitations for AP requests.
38 */
39#define ZCRYPT_RNG_BUFFER_SIZE	4096
40
41/*
42 * Identifier for Crypto Request Performance Index
43 */
44enum crypto_ops {
45	MEX_1K,
46	MEX_2K,
47	MEX_4K,
48	CRT_1K,
49	CRT_2K,
50	CRT_4K,
51	HWRNG,
52	SECKEY,
53	NUM_OPS
54};
55
56struct zcrypt_queue;
57
58/* struct to hold tracking information for a userspace request/response */
59struct zcrypt_track {
60	int again_counter;		/* retry attempts counter */
61	int last_qid;			/* last qid used */
62	int last_rc;			/* last return code */
63#ifdef CONFIG_ZCRYPT_DEBUG
64	struct ap_fi fi;		/* failure injection cmd */
65#endif
66};
67
68/* defines related to message tracking */
69#define TRACK_AGAIN_MAX 10
70#define TRACK_AGAIN_CARD_WEIGHT_PENALTY  1000
71#define TRACK_AGAIN_QUEUE_WEIGHT_PENALTY 10000
72
73struct zcrypt_ops {
74	long (*rsa_modexpo)(struct zcrypt_queue *, struct ica_rsa_modexpo *,
75			    struct ap_message *);
76	long (*rsa_modexpo_crt)(struct zcrypt_queue *,
77				struct ica_rsa_modexpo_crt *,
78				struct ap_message *);
79	long (*send_cprb)(bool userspace, struct zcrypt_queue *, struct ica_xcRB *,
80			  struct ap_message *);
81	long (*send_ep11_cprb)(bool userspace, struct zcrypt_queue *, struct ep11_urb *,
82			       struct ap_message *);
83	long (*rng)(struct zcrypt_queue *, char *, struct ap_message *);
84	struct list_head list;		/* zcrypt ops list. */
85	struct module *owner;
86	int variant;
87	char name[128];
88};
89
90struct zcrypt_card {
91	struct list_head list;		/* Device list. */
92	struct list_head zqueues;	/* List of zcrypt queues */
93	struct kref refcount;		/* device refcounting */
94	struct ap_card *card;		/* The "real" ap card device. */
95	int online;			/* User online/offline */
96
97	int user_space_type;		/* User space device id. */
98	char *type_string;		/* User space device name. */
99	int min_mod_size;		/* Min number of bits. */
100	int max_mod_size;		/* Max number of bits. */
101	int max_exp_bit_length;
102	const int *speed_rating;	/* Speed idx of crypto ops. */
103	atomic_t load;			/* Utilization of the crypto device */
104
105	int request_count;		/* # current requests. */
106};
107
108struct zcrypt_queue {
109	struct list_head list;		/* Device list. */
110	struct kref refcount;		/* device refcounting */
111	struct zcrypt_card *zcard;
112	struct zcrypt_ops *ops;		/* Crypto operations. */
113	struct ap_queue *queue;		/* The "real" ap queue device. */
114	int online;			/* User online/offline */
115
116	atomic_t load;			/* Utilization of the crypto device */
117
118	int request_count;		/* # current requests. */
119
120	struct ap_message reply;	/* Per-device reply structure. */
121};
122
123/* transport layer rescanning */
124extern atomic_t zcrypt_rescan_req;
125
126extern spinlock_t zcrypt_list_lock;
127extern int zcrypt_device_count;
128extern struct list_head zcrypt_card_list;
129
130#define for_each_zcrypt_card(_zc) \
131	list_for_each_entry(_zc, &zcrypt_card_list, list)
132
133#define for_each_zcrypt_queue(_zq, _zc) \
134	list_for_each_entry(_zq, &(_zc)->zqueues, list)
135
136struct zcrypt_card *zcrypt_card_alloc(void);
137void zcrypt_card_free(struct zcrypt_card *);
138void zcrypt_card_get(struct zcrypt_card *);
139int zcrypt_card_put(struct zcrypt_card *);
140int zcrypt_card_register(struct zcrypt_card *);
141void zcrypt_card_unregister(struct zcrypt_card *);
142
143struct zcrypt_queue *zcrypt_queue_alloc(size_t);
144void zcrypt_queue_free(struct zcrypt_queue *);
145void zcrypt_queue_get(struct zcrypt_queue *);
146int zcrypt_queue_put(struct zcrypt_queue *);
147int zcrypt_queue_register(struct zcrypt_queue *);
148void zcrypt_queue_unregister(struct zcrypt_queue *);
149void zcrypt_queue_force_online(struct zcrypt_queue *, int);
150
151int zcrypt_rng_device_add(void);
152void zcrypt_rng_device_remove(void);
153
154void zcrypt_msgtype_register(struct zcrypt_ops *);
155void zcrypt_msgtype_unregister(struct zcrypt_ops *);
156struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int);
157int zcrypt_api_init(void);
158void zcrypt_api_exit(void);
159long zcrypt_send_cprb(struct ica_xcRB *xcRB);
160long zcrypt_send_ep11_cprb(struct ep11_urb *urb);
161void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus);
162int zcrypt_device_status_ext(int card, int queue,
163			     struct zcrypt_device_status_ext *devstatus);
164
165static inline unsigned long z_copy_from_user(bool userspace,
166					     void *to,
167					     const void __user *from,
168					     unsigned long n)
169{
170	if (likely(userspace))
171		return copy_from_user(to, from, n);
172	memcpy(to, (void __force *) from, n);
173	return 0;
174}
175
176static inline unsigned long z_copy_to_user(bool userspace,
177					   void __user *to,
178					   const void *from,
179					   unsigned long n)
180{
181	if (likely(userspace))
182		return copy_to_user(to, from, n);
183	memcpy((void __force *) to, from, n);
184	return 0;
185}
186
187#endif /* _ZCRYPT_API_H_ */
188