Lines Matching refs:ndd

273 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
281 for_each_dpa_resource(ndd, res)
291 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
296 if (!nsblk->uuid || !nsblk->lbasize || !ndd)
301 for_each_dpa_resource(ndd, res) {
320 for_each_dpa_resource(ndd, res)
426 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
434 for_each_dpa_resource(ndd, res)
443 nd_dbg_dpa(nd_region, ndd, res, "delete %d\n", rc);
444 nvdimm_free_dpa(ndd, res);
461 nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc);
501 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
513 res = nvdimm_allocate_dpa(ndd, label_id, first_dpa, n);
517 nd_dbg_dpa(nd_region, ndd, res, "init %d\n", rc);
525 * @ndd: dimm device data for debug
538 static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
600 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
605 for_each_dpa_resource(ndd, res)
614 for_each_dpa_resource(ndd, res) {
631 space_valid(nd_region, ndd, label_id, NULL, next, exist,
642 space_valid(nd_region, ndd, label_id, res, next, exist,
653 space_valid(nd_region, ndd, label_id, res, next, exist,
701 new_res = nvdimm_allocate_dpa(ndd, label_id,
716 nd_dbg_dpa(nd_region, ndd, new_res, "%s(%d) %d\n",
741 if ((is_pmem || !ndd->dpa.child) && n == to_allocate)
749 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
755 for_each_dpa_resource(ndd, res) {
765 nvdimm_free_dpa(ndd, next);
767 nd_dbg_dpa(nd_region, ndd, res, "merge %d\n", rc);
817 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
820 for_each_dpa_resource_safe(ndd, res, _res)
822 nvdimm_free_dpa(ndd, res);
913 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
917 if (!ndd) {
925 for_each_dpa_resource(ndd, res)
956 struct nvdimm_drvdata *ndd;
999 ndd = to_ndd(nd_mapping);
1005 if (!ndd)
1008 allocated += nvdimm_allocated_dpa(ndd, &label_id);
1221 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1225 for_each_dpa_resource(ndd, res)
1404 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1407 for_each_dpa_resource(ndd, res)
1425 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1432 if (!ndd) {
1437 nsindex = to_namespace_index(ndd, ndd->ns_current);
1843 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1864 if (namespace_label_has(ndd, type_guid)
1867 dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n",
1874 dev_dbg(ndd->dev, "duplicate entry for uuid\n");
1900 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1933 dev_name(ndd->dev), nd_label->uuid);
2028 struct nvdimm_drvdata *ndd;
2049 ndd = to_ndd(nd_mapping);
2050 if (namespace_label_has(ndd, abstraction_guid))
2081 struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
2094 for_each_dpa_resource(ndd, res)
2223 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2238 res = nsblk_add_resource(nd_region, ndd,
2243 nd_dbg_dpa(nd_region, ndd, res, "%d assign\n", count);
2262 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2268 if (namespace_label_has(ndd, type_guid)) {
2270 dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n",
2277 dev_dbg(ndd->dev, "expect cookie %#llx got %#llx\n",
2294 if (namespace_label_has(ndd, abstraction_guid))
2306 res = nsblk_add_resource(nd_region, ndd, nsblk,
2310 nd_dbg_dpa(nd_region, ndd, res, "%d: assign\n", count);
2385 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2388 nsindex = to_namespace_index(ndd, ndd->ns_current);
2514 struct nvdimm_drvdata *ndd = nd_mapping->ndd;
2521 put_ndd(ndd);
2522 nd_mapping->ndd = NULL;
2523 if (ndd)
2534 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2543 if (!ndd) {
2558 nd_mapping->ndd = ndd;
2560 get_ndd(ndd);
2562 count = nd_label_active_count(ndd);
2563 dev_dbg(ndd->dev, "count: %d\n", count);
2572 label = nd_label_active(ndd, j);