Lines Matching refs:pe

48 	struct eeh_pe *pe;
58 pe = kzalloc(alloc_size, GFP_KERNEL);
59 if (!pe) return NULL;
62 pe->type = type;
63 pe->phb = phb;
64 INIT_LIST_HEAD(&pe->child_list);
65 INIT_LIST_HEAD(&pe->edevs);
67 pe->data = (void *)pe + ALIGN(sizeof(struct eeh_pe),
69 return pe;
81 struct eeh_pe *pe;
84 pe = eeh_pe_alloc(phb, EEH_PE_PHB);
85 if (!pe) {
91 list_add_tail(&pe->child, &eeh_phb_pe);
100 * @pe: EEH PE
106 int eeh_wait_state(struct eeh_pe *pe, int max_wait)
123 ret = eeh_ops->get_state(pe, &mwait);
159 struct eeh_pe *pe;
161 list_for_each_entry(pe, &eeh_phb_pe, child) {
167 if ((pe->type & EEH_PE_PHB) && pe->phb == phb)
168 return pe;
176 * @pe: current PE
182 struct eeh_pe *eeh_pe_next(struct eeh_pe *pe, struct eeh_pe *root)
184 struct list_head *next = pe->child_list.next;
186 if (next == &pe->child_list) {
188 if (pe == root)
190 next = pe->child.next;
191 if (next != &pe->parent->child_list)
193 pe = pe->parent;
214 struct eeh_pe *pe;
217 eeh_for_each_pe(root, pe) {
218 ret = fn(pe, flag);
237 struct eeh_pe *pe;
247 eeh_for_each_pe(root, pe)
248 eeh_pe_for_each_dev(pe, edev, tmp)
260 static void *__eeh_pe_get(struct eeh_pe *pe, void *flag)
265 if (pe->type & EEH_PE_PHB)
268 if (*target_pe == pe->addr)
269 return pe;
309 struct eeh_pe *pe, *parent;
317 pe = eeh_pe_get(hose, edev->pe_config_addr);
318 if (pe) {
319 if (pe->type & EEH_PE_INVALID) {
320 list_add_tail(&edev->entry, &pe->edevs);
321 edev->pe = pe;
326 parent = pe;
335 pe->parent->addr);
338 pe->type = EEH_PE_BUS;
339 edev->pe = pe;
342 list_add_tail(&edev->entry, &pe->edevs);
350 pe = eeh_pe_alloc(hose, EEH_PE_VF);
352 pe = eeh_pe_alloc(hose, EEH_PE_DEVICE);
353 if (!pe) {
358 pe->addr = edev->pe_config_addr;
371 edev->pe = NULL;
372 kfree(pe);
378 pe->parent = new_pe_parent;
379 list_add_tail(&pe->child, &new_pe_parent->child_list);
385 list_add_tail(&edev->entry, &pe->edevs);
386 edev->pe = pe;
404 struct eeh_pe *pe, *parent, *child;
408 pe = eeh_dev_to_pe(edev);
409 if (!pe) {
415 edev->pe = NULL;
425 parent = pe->parent;
428 if (pe->type & EEH_PE_PHB)
436 keep = !!(pe->state & EEH_PE_KEEP);
437 recover = !!(pe->state & EEH_PE_RECOVERING);
441 if (list_empty(&pe->edevs) &&
442 list_empty(&pe->child_list)) {
443 list_del(&pe->child);
444 kfree(pe);
458 if (list_empty(&pe->edevs)) {
460 list_for_each_entry(child, &pe->child_list, child) {
468 pe->type |= EEH_PE_INVALID;
474 pe = parent;
482 * @pe: EEH PE
489 void eeh_pe_update_time_stamp(struct eeh_pe *pe)
493 if (!pe) return;
495 if (pe->freeze_count <= 0) {
496 pe->freeze_count = 0;
497 pe->tstamp = ktime_get_seconds();
500 if (tstamp - pe->tstamp > 3600) {
501 pe->tstamp = tstamp;
502 pe->freeze_count = 0;
509 * @pe: EEH PE
517 struct eeh_pe *pe;
519 eeh_for_each_pe(root, pe)
520 if (!(pe->state & EEH_PE_REMOVED))
521 pe->state |= state;
527 * @pe: EEH PE
535 struct eeh_pe *pe;
540 eeh_for_each_pe(root, pe) {
541 list_for_each_entry(edev, &pe->edevs, entry) {
547 if (pe->state & EEH_PE_CFG_RESTRICTED)
548 pe->state |= EEH_PE_CFG_BLOCKED;
562 * @pe: EEH PE
566 void eeh_pe_dev_mode_mark(struct eeh_pe *pe, int mode)
568 eeh_pe_dev_traverse(pe, __eeh_pe_dev_mode_mark, &mode);
583 struct eeh_pe *pe;
587 eeh_for_each_pe(root, pe) {
589 if (pe->state & EEH_PE_REMOVED)
592 if (!include_passed && eeh_pe_passed(pe))
595 pe->state &= ~state;
605 pe->check_count = 0;
606 eeh_pe_for_each_dev(pe, edev, tmp) {
615 if (pe->state & EEH_PE_CFG_RESTRICTED)
616 pe->state &= ~EEH_PE_CFG_BLOCKED;
787 * @pe: EEH PE
792 void eeh_pe_restore_bars(struct eeh_pe *pe)
798 eeh_pe_dev_traverse(pe, eeh_restore_one_device_bars, NULL);
803 * @pe: EEH PE
810 const char *eeh_pe_loc_get(struct eeh_pe *pe)
812 struct pci_bus *bus = eeh_pe_bus_get(pe);
840 * @pe: EEH PE
848 struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
853 if (pe->type & EEH_PE_PHB)
854 return pe->phb->bus;
857 if (pe->state & EEH_PE_PRI_BUS)
858 return pe->bus;
861 edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, entry);