Lines Matching defs:ppl_conf

58  * grouped in child_logs array in struct ppl_conf, which is assigned to
87 struct ppl_conf {
116 struct ppl_conf *ppl_conf; /* shared between all log instances */
235 struct ppl_conf *ppl_conf = log->ppl_conf;
240 io = mempool_alloc(&ppl_conf->io_pool, GFP_NOWAIT);
259 pplhdr->signature = cpu_to_le32(ppl_conf->signature);
261 io->seq = atomic64_add_return(1, &ppl_conf->seq);
363 struct ppl_conf *ppl_conf = conf->log_private;
374 log = &ppl_conf->child_logs[sh->pd_idx];
388 spin_lock_irq(&ppl_conf->no_mem_stripes_lock);
389 list_add_tail(&sh->log_list, &ppl_conf->no_mem_stripes);
390 spin_unlock_irq(&ppl_conf->no_mem_stripes_lock);
402 struct ppl_conf *ppl_conf = log->ppl_conf;
408 md_error(ppl_conf->mddev, log->rdev);
431 struct ppl_conf *ppl_conf = log->ppl_conf;
452 ilog2(ppl_conf->block_size >> 9));
482 if ((ppl_conf->child_logs[i].wb_cache_on) &&
497 &ppl_conf->bs);
534 struct ppl_conf *ppl_conf = conf->log_private;
538 for (i = 0; i < ppl_conf->count; i++) {
539 log = &ppl_conf->child_logs[i];
550 struct ppl_conf *ppl_conf = log->ppl_conf;
551 struct r5conf *conf = ppl_conf->mddev->private;
562 mempool_free(io, &ppl_conf->io_pool);
564 spin_lock(&ppl_conf->no_mem_stripes_lock);
565 if (!list_empty(&ppl_conf->no_mem_stripes)) {
568 sh = list_first_entry(&ppl_conf->no_mem_stripes,
574 spin_unlock(&ppl_conf->no_mem_stripes_lock);
585 struct ppl_conf *ppl_conf = log->ppl_conf;
586 struct r5conf *conf = ppl_conf->mddev->private;
611 struct ppl_conf *ppl_conf = log->ppl_conf;
612 struct r5conf *conf = ppl_conf->mddev->private;
634 GFP_NOIO, &ppl_conf->flush_bs);
666 struct ppl_conf *ppl_conf = conf->log_private;
670 for (i = 0; i < ppl_conf->count; i++) {
671 struct ppl_log *log = &ppl_conf->child_logs[i];
789 struct ppl_conf *ppl_conf = log->ppl_conf;
790 struct mddev *mddev = ppl_conf->mddev;
792 int block_size = ppl_conf->block_size;
968 struct ppl_conf *ppl_conf = log->ppl_conf;
1023 ppl_conf->mismatch_count++;
1028 ppl_conf->recovered_entries++;
1060 pplhdr->signature = cpu_to_le32(log->ppl_conf->signature);
1076 struct ppl_conf *ppl_conf = log->ppl_conf;
1133 ppl_conf->signature = signature;
1134 } else if (ppl_conf->signature != signature) {
1136 __func__, signature, ppl_conf->signature,
1165 ppl_conf->mismatch_count++;
1183 __func__, ret, ppl_conf->mismatch_count,
1184 ppl_conf->recovered_entries);
1188 static int ppl_load(struct ppl_conf *ppl_conf)
1195 for (i = 0; i < ppl_conf->count; i++) {
1196 struct ppl_log *log = &ppl_conf->child_logs[i];
1211 if (ppl_conf->mddev->external) {
1213 signature = ppl_conf->signature;
1215 } else if (signature != ppl_conf->signature) {
1217 mdname(ppl_conf->mddev));
1225 __func__, ret, ppl_conf->mismatch_count,
1226 ppl_conf->recovered_entries);
1230 static void __ppl_exit_log(struct ppl_conf *ppl_conf)
1232 clear_bit(MD_HAS_PPL, &ppl_conf->mddev->flags);
1233 clear_bit(MD_HAS_MULTIPLE_PPLS, &ppl_conf->mddev->flags);
1235 kfree(ppl_conf->child_logs);
1237 bioset_exit(&ppl_conf->bs);
1238 bioset_exit(&ppl_conf->flush_bs);
1239 mempool_exit(&ppl_conf->io_pool);
1240 kmem_cache_destroy(ppl_conf->io_kc);
1242 kfree(ppl_conf);
1247 struct ppl_conf *ppl_conf = conf->log_private;
1249 if (ppl_conf) {
1250 __ppl_exit_log(ppl_conf);
1308 &log->ppl_conf->mddev->flags);
1323 struct ppl_conf *ppl_conf;
1361 ppl_conf = kzalloc(sizeof(struct ppl_conf), GFP_KERNEL);
1362 if (!ppl_conf)
1365 ppl_conf->mddev = mddev;
1367 ppl_conf->io_kc = KMEM_CACHE(ppl_io_unit, 0);
1368 if (!ppl_conf->io_kc) {
1373 ret = mempool_init(&ppl_conf->io_pool, conf->raid_disks, ppl_io_pool_alloc,
1374 ppl_io_pool_free, ppl_conf->io_kc);
1378 ret = bioset_init(&ppl_conf->bs, conf->raid_disks, 0, BIOSET_NEED_BVECS);
1382 ret = bioset_init(&ppl_conf->flush_bs, conf->raid_disks, 0, 0);
1386 ppl_conf->count = conf->raid_disks;
1387 ppl_conf->child_logs = kcalloc(ppl_conf->count, sizeof(struct ppl_log),
1389 if (!ppl_conf->child_logs) {
1394 atomic64_set(&ppl_conf->seq, 0);
1395 INIT_LIST_HEAD(&ppl_conf->no_mem_stripes);
1396 spin_lock_init(&ppl_conf->no_mem_stripes_lock);
1399 ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid));
1400 ppl_conf->block_size = 512;
1402 ppl_conf->block_size = queue_logical_block_size(mddev->queue);
1405 for (i = 0; i < ppl_conf->count; i++) {
1406 struct ppl_log *log = &ppl_conf->child_logs[i];
1415 log->ppl_conf = ppl_conf;
1428 ret = ppl_load(ppl_conf);
1433 ppl_conf->recovered_entries > 0 &&
1434 ppl_conf->mismatch_count == 0) {
1441 } else if (mddev->pers && ppl_conf->mismatch_count > 0) {
1447 conf->log_private = ppl_conf;
1448 set_bit(MD_HAS_PPL, &ppl_conf->mddev->flags);
1452 __ppl_exit_log(ppl_conf);
1458 struct ppl_conf *ppl_conf = conf->log_private;
1472 if (rdev->raid_disk >= ppl_conf->count)
1475 log = &ppl_conf->child_logs[rdev->raid_disk];