1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <adf_accel_devices.h>
4 #include <adf_pf2vf_msg.h>
5 #include <adf_common_drv.h>
6 #include "adf_dh895xcc_hw_data.h"
7 
8 /* Worker thread to service arbiter mappings based on dev SKUs */
9 static const u32 thrd_to_arb_map_sku4[] = {
10 	0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
11 	0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
12 	0x00000000, 0x00000000, 0x00000000, 0x00000000
13 };
14 
15 static const u32 thrd_to_arb_map_sku6[] = {
16 	0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
17 	0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
18 	0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222
19 };
20 
21 static struct adf_hw_device_class dh895xcc_class = {
22 	.name = ADF_DH895XCC_DEVICE_NAME,
23 	.type = DEV_DH895XCC,
24 	.instances = 0
25 };
26 
get_accel_mask(struct adf_hw_device_data *self)27 static u32 get_accel_mask(struct adf_hw_device_data *self)
28 {
29 	u32 fuses = self->fuses;
30 
31 	return ~fuses >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET &
32 			 ADF_DH895XCC_ACCELERATORS_MASK;
33 }
34 
get_ae_mask(struct adf_hw_device_data *self)35 static u32 get_ae_mask(struct adf_hw_device_data *self)
36 {
37 	u32 fuses = self->fuses;
38 
39 	return ~fuses & ADF_DH895XCC_ACCELENGINES_MASK;
40 }
41 
get_num_accels(struct adf_hw_device_data *self)42 static u32 get_num_accels(struct adf_hw_device_data *self)
43 {
44 	u32 i, ctr = 0;
45 
46 	if (!self || !self->accel_mask)
47 		return 0;
48 
49 	for (i = 0; i < ADF_DH895XCC_MAX_ACCELERATORS; i++) {
50 		if (self->accel_mask & (1 << i))
51 			ctr++;
52 	}
53 	return ctr;
54 }
55 
get_num_aes(struct adf_hw_device_data *self)56 static u32 get_num_aes(struct adf_hw_device_data *self)
57 {
58 	u32 i, ctr = 0;
59 
60 	if (!self || !self->ae_mask)
61 		return 0;
62 
63 	for (i = 0; i < ADF_DH895XCC_MAX_ACCELENGINES; i++) {
64 		if (self->ae_mask & (1 << i))
65 			ctr++;
66 	}
67 	return ctr;
68 }
69 
get_misc_bar_id(struct adf_hw_device_data *self)70 static u32 get_misc_bar_id(struct adf_hw_device_data *self)
71 {
72 	return ADF_DH895XCC_PMISC_BAR;
73 }
74 
get_etr_bar_id(struct adf_hw_device_data *self)75 static u32 get_etr_bar_id(struct adf_hw_device_data *self)
76 {
77 	return ADF_DH895XCC_ETR_BAR;
78 }
79 
get_sram_bar_id(struct adf_hw_device_data *self)80 static u32 get_sram_bar_id(struct adf_hw_device_data *self)
81 {
82 	return ADF_DH895XCC_SRAM_BAR;
83 }
84 
get_sku(struct adf_hw_device_data *self)85 static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
86 {
87 	int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK)
88 	    >> ADF_DH895XCC_FUSECTL_SKU_SHIFT;
89 
90 	switch (sku) {
91 	case ADF_DH895XCC_FUSECTL_SKU_1:
92 		return DEV_SKU_1;
93 	case ADF_DH895XCC_FUSECTL_SKU_2:
94 		return DEV_SKU_2;
95 	case ADF_DH895XCC_FUSECTL_SKU_3:
96 		return DEV_SKU_3;
97 	case ADF_DH895XCC_FUSECTL_SKU_4:
98 		return DEV_SKU_4;
99 	default:
100 		return DEV_SKU_UNKNOWN;
101 	}
102 	return DEV_SKU_UNKNOWN;
103 }
104 
adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, u32 const **arb_map_config)105 static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
106 				    u32 const **arb_map_config)
107 {
108 	switch (accel_dev->accel_pci_dev.sku) {
109 	case DEV_SKU_1:
110 		*arb_map_config = thrd_to_arb_map_sku4;
111 		break;
112 
113 	case DEV_SKU_2:
114 	case DEV_SKU_4:
115 		*arb_map_config = thrd_to_arb_map_sku6;
116 		break;
117 	default:
118 		dev_err(&GET_DEV(accel_dev),
119 			"The configuration doesn't match any SKU");
120 		*arb_map_config = NULL;
121 	}
122 }
123 
get_pf2vf_offset(u32 i)124 static u32 get_pf2vf_offset(u32 i)
125 {
126 	return ADF_DH895XCC_PF2VF_OFFSET(i);
127 }
128 
get_vintmsk_offset(u32 i)129 static u32 get_vintmsk_offset(u32 i)
130 {
131 	return ADF_DH895XCC_VINTMSK_OFFSET(i);
132 }
133 
adf_enable_error_correction(struct adf_accel_dev *accel_dev)134 static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
135 {
136 	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
137 	struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR];
138 	unsigned long accel_mask = hw_device->accel_mask;
139 	unsigned long ae_mask = hw_device->ae_mask;
140 	void __iomem *csr = misc_bar->virt_addr;
141 	unsigned int val, i;
142 
143 	/* Enable Accel Engine error detection & correction */
144 	for_each_set_bit(i, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) {
145 		val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_CTX_ENABLES(i));
146 		val |= ADF_DH895XCC_ENABLE_AE_ECC_ERR;
147 		ADF_CSR_WR(csr, ADF_DH895XCC_AE_CTX_ENABLES(i), val);
148 		val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_MISC_CONTROL(i));
149 		val |= ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR;
150 		ADF_CSR_WR(csr, ADF_DH895XCC_AE_MISC_CONTROL(i), val);
151 	}
152 
153 	/* Enable shared memory error detection & correction */
154 	for_each_set_bit(i, &accel_mask, ADF_DH895XCC_MAX_ACCELERATORS) {
155 		val = ADF_CSR_RD(csr, ADF_DH895XCC_UERRSSMSH(i));
156 		val |= ADF_DH895XCC_ERRSSMSH_EN;
157 		ADF_CSR_WR(csr, ADF_DH895XCC_UERRSSMSH(i), val);
158 		val = ADF_CSR_RD(csr, ADF_DH895XCC_CERRSSMSH(i));
159 		val |= ADF_DH895XCC_ERRSSMSH_EN;
160 		ADF_CSR_WR(csr, ADF_DH895XCC_CERRSSMSH(i), val);
161 	}
162 }
163 
adf_enable_ints(struct adf_accel_dev *accel_dev)164 static void adf_enable_ints(struct adf_accel_dev *accel_dev)
165 {
166 	void __iomem *addr;
167 
168 	addr = (&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr;
169 
170 	/* Enable bundle and misc interrupts */
171 	ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET,
172 		   accel_dev->pf.vf_info ? 0 :
173 			GENMASK_ULL(GET_MAX_BANKS(accel_dev) - 1, 0));
174 	ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET,
175 		   ADF_DH895XCC_SMIA1_MASK);
176 }
177 
adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)178 static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
179 {
180 	return 0;
181 }
182 
adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)183 void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
184 {
185 	hw_data->dev_class = &dh895xcc_class;
186 	hw_data->instance_id = dh895xcc_class.instances++;
187 	hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS;
188 	hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS;
189 	hw_data->num_logical_accel = 1;
190 	hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES;
191 	hw_data->tx_rx_gap = ADF_DH895XCC_RX_RINGS_OFFSET;
192 	hw_data->tx_rings_mask = ADF_DH895XCC_TX_RINGS_MASK;
193 	hw_data->alloc_irq = adf_isr_resource_alloc;
194 	hw_data->free_irq = adf_isr_resource_free;
195 	hw_data->enable_error_correction = adf_enable_error_correction;
196 	hw_data->get_accel_mask = get_accel_mask;
197 	hw_data->get_ae_mask = get_ae_mask;
198 	hw_data->get_num_accels = get_num_accels;
199 	hw_data->get_num_aes = get_num_aes;
200 	hw_data->get_etr_bar_id = get_etr_bar_id;
201 	hw_data->get_misc_bar_id = get_misc_bar_id;
202 	hw_data->get_pf2vf_offset = get_pf2vf_offset;
203 	hw_data->get_vintmsk_offset = get_vintmsk_offset;
204 	hw_data->get_sram_bar_id = get_sram_bar_id;
205 	hw_data->get_sku = get_sku;
206 	hw_data->fw_name = ADF_DH895XCC_FW;
207 	hw_data->fw_mmp_name = ADF_DH895XCC_MMP;
208 	hw_data->init_admin_comms = adf_init_admin_comms;
209 	hw_data->exit_admin_comms = adf_exit_admin_comms;
210 	hw_data->disable_iov = adf_disable_sriov;
211 	hw_data->send_admin_init = adf_send_admin_init;
212 	hw_data->init_arb = adf_init_arb;
213 	hw_data->exit_arb = adf_exit_arb;
214 	hw_data->get_arb_mapping = adf_get_arbiter_mapping;
215 	hw_data->enable_ints = adf_enable_ints;
216 	hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
217 	hw_data->reset_device = adf_reset_sbr;
218 	hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
219 }
220 
adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)221 void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
222 {
223 	hw_data->dev_class->instances--;
224 }
225