Lines Matching refs:prdev
24 __page_reporting_request(struct page_reporting_dev_info *prdev)
29 state = atomic_read(&prdev->state);
37 state = atomic_xchg(&prdev->state, PAGE_REPORTING_REQUESTED);
46 schedule_delayed_work(&prdev->work, PAGE_REPORTING_DELAY);
49 /* notify prdev of free page reporting request */
52 struct page_reporting_dev_info *prdev;
60 prdev = rcu_dereference(pr_dev_info);
61 if (likely(prdev))
62 __page_reporting_request(prdev);
68 page_reporting_drain(struct page_reporting_dev_info *prdev,
109 page_reporting_cycle(struct page_reporting_dev_info *prdev, struct zone *zone,
157 atomic_set(&prdev->state, PAGE_REPORTING_REQUESTED);
188 err = prdev->report(prdev, sgl, PAGE_REPORTING_CAPACITY);
200 page_reporting_drain(prdev, sgl, PAGE_REPORTING_CAPACITY, !err);
223 page_reporting_process_zone(struct page_reporting_dev_info *prdev,
248 err = page_reporting_cycle(prdev, zone, order, mt,
259 err = prdev->report(prdev, sgl, leftover);
263 page_reporting_drain(prdev, sgl, leftover, !err);
273 struct page_reporting_dev_info *prdev =
285 atomic_set(&prdev->state, state);
295 err = page_reporting_process_zone(prdev, sgl, zone);
307 state = atomic_cmpxchg(&prdev->state, state, PAGE_REPORTING_IDLE);
309 schedule_delayed_work(&prdev->work, PAGE_REPORTING_DELAY);
315 int page_reporting_register(struct page_reporting_dev_info *prdev)
328 atomic_set(&prdev->state, PAGE_REPORTING_IDLE);
329 INIT_DELAYED_WORK(&prdev->work, &page_reporting_process);
332 __page_reporting_request(prdev);
335 rcu_assign_pointer(pr_dev_info, prdev);
349 void page_reporting_unregister(struct page_reporting_dev_info *prdev)
353 if (rcu_access_pointer(pr_dev_info) == prdev) {
359 cancel_delayed_work_sync(&prdev->work);