Lines Matching refs:spa

46 struct spa {
87 struct spa *spa;
100 static void read_irq(struct spa *spa, u64 *dsisr, u64 *dar, u64 *pe)
104 *dsisr = in_be64(spa->reg_dsisr);
105 *dar = in_be64(spa->reg_dar);
106 reg = in_be64(spa->reg_pe_handle);
110 static void ack_irq(struct spa *spa, enum xsl_response r)
123 trace_ocxl_fault_ack(spa->spa_mem, spa->xsl_fault.pe,
124 spa->xsl_fault.dsisr, spa->xsl_fault.dar, reg);
125 out_be64(spa->reg_tfc, reg);
136 struct spa *spa = container_of(fault, struct spa, xsl_fault);
178 ack_irq(spa, r);
184 struct spa *spa = link->spa;
191 read_irq(spa, &dsisr, &dar, &pe_handle);
192 trace_ocxl_fault(spa->spa_mem, pe_handle, dsisr, dar, -1);
195 pe = spa->spa_mem + pe_handle;
206 ack_irq(spa, ADDRESS_ERROR);
211 pe_data = radix_tree_lookup(&spa->pe_tree, pe_handle);
225 ack_irq(spa, ADDRESS_ERROR);
236 ack_irq(spa, ADDRESS_ERROR);
242 spa->xsl_fault.pe = pe_handle;
243 spa->xsl_fault.dar = dar;
244 spa->xsl_fault.dsisr = dsisr;
245 spa->xsl_fault.pe_data = *pe_data;
251 schedule_work(&spa->xsl_fault.fault_work);
253 ack_irq(spa, ADDRESS_ERROR);
257 static void unmap_irq_registers(struct spa *spa)
259 pnv_ocxl_unmap_xsl_regs(spa->reg_dsisr, spa->reg_dar, spa->reg_tfc,
260 spa->reg_pe_handle);
263 static int map_irq_registers(struct pci_dev *dev, struct spa *spa)
265 return pnv_ocxl_map_xsl_regs(dev, &spa->reg_dsisr, &spa->reg_dar,
266 &spa->reg_tfc, &spa->reg_pe_handle);
271 struct spa *spa = link->spa;
279 rc = map_irq_registers(dev, spa);
283 spa->irq_name = kasprintf(GFP_KERNEL, "ocxl-xsl-%x-%x-%x",
285 if (!spa->irq_name) {
294 spa->virq = irq_create_mapping(NULL, hwirq);
295 if (!spa->virq) {
302 dev_dbg(&dev->dev, "hwirq %d mapped to virq %d\n", hwirq, spa->virq);
304 rc = request_irq(spa->virq, xsl_fault_handler, 0, spa->irq_name,
316 irq_dispose_mapping(spa->virq);
318 kfree(spa->irq_name);
320 unmap_irq_registers(spa);
326 struct spa *spa = link->spa;
328 if (spa->virq) {
329 free_irq(spa->virq, link);
330 irq_dispose_mapping(spa->virq);
332 kfree(spa->irq_name);
333 unmap_irq_registers(spa);
338 struct spa *spa;
340 spa = kzalloc(sizeof(struct spa), GFP_KERNEL);
341 if (!spa)
344 mutex_init(&spa->spa_lock);
345 INIT_RADIX_TREE(&spa->pe_tree, GFP_KERNEL);
346 INIT_WORK(&spa->xsl_fault.fault_work, xsl_fault_handler_bh);
348 spa->spa_order = SPA_SPA_SIZE_LOG - PAGE_SHIFT;
349 spa->spa_mem = (struct ocxl_process_element *)
350 __get_free_pages(GFP_KERNEL | __GFP_ZERO, spa->spa_order);
351 if (!spa->spa_mem) {
353 kfree(spa);
357 link->dev, spa->spa_mem);
359 link->spa = spa;
365 struct spa *spa = link->spa;
370 if (spa && spa->spa_mem) {
371 free_pages((unsigned long) spa->spa_mem, spa->spa_order);
372 kfree(spa);
373 link->spa = NULL;
401 rc = pnv_ocxl_spa_setup(dev, link->spa->spa_mem, PE_mask,
502 struct spa *spa = link->spa;
511 mutex_lock(&spa->spa_lock);
513 pe = spa->spa_mem + pe_handle;
551 radix_tree_insert(&spa->pe_tree, pe_handle, pe_data);
569 trace_ocxl_context_add(current->pid, spa->spa_mem, pasid, pidr, tidr);
571 mutex_unlock(&spa->spa_lock);
579 struct spa *spa = link->spa;
587 pe = spa->spa_mem + pe_handle;
589 mutex_lock(&spa->spa_lock);
608 mutex_unlock(&spa->spa_lock);
615 struct spa *spa = link->spa;
642 pe = spa->spa_mem + pe_handle;
644 mutex_lock(&spa->spa_lock);
651 trace_ocxl_context_remove(current->pid, spa->spa_mem, pasid,
670 pe_data = radix_tree_delete(&spa->pe_tree, pe_handle);
681 mutex_unlock(&spa->spa_lock);