Lines Matching defs:ppl_conf

58  * grouped in child_logs array in struct ppl_conf, which is assigned to
87 struct ppl_conf {
116 struct ppl_conf *ppl_conf; /* shared between all log instances */
235 struct ppl_conf *ppl_conf = log->ppl_conf;
240 io = mempool_alloc(&ppl_conf->io_pool, GFP_NOWAIT);
258 pplhdr->signature = cpu_to_le32(ppl_conf->signature);
260 io->seq = atomic64_add_return(1, &ppl_conf->seq);
362 struct ppl_conf *ppl_conf = conf->log_private;
373 log = &ppl_conf->child_logs[sh->pd_idx];
387 spin_lock_irq(&ppl_conf->no_mem_stripes_lock);
388 list_add_tail(&sh->log_list, &ppl_conf->no_mem_stripes);
389 spin_unlock_irq(&ppl_conf->no_mem_stripes_lock);
401 struct ppl_conf *ppl_conf = log->ppl_conf;
407 md_error(ppl_conf->mddev, log->rdev);
432 struct ppl_conf *ppl_conf = log->ppl_conf;
453 ilog2(ppl_conf->block_size >> 9));
472 bio->bi_write_hint = ppl_conf->write_hint;
486 if ((ppl_conf->child_logs[i].wb_cache_on) &&
500 &ppl_conf->bs);
540 struct ppl_conf *ppl_conf = conf->log_private;
544 for (i = 0; i < ppl_conf->count; i++) {
545 log = &ppl_conf->child_logs[i];
556 struct ppl_conf *ppl_conf = log->ppl_conf;
557 struct r5conf *conf = ppl_conf->mddev->private;
568 mempool_free(io, &ppl_conf->io_pool);
570 spin_lock(&ppl_conf->no_mem_stripes_lock);
571 if (!list_empty(&ppl_conf->no_mem_stripes)) {
574 sh = list_first_entry(&ppl_conf->no_mem_stripes,
580 spin_unlock(&ppl_conf->no_mem_stripes_lock);
591 struct ppl_conf *ppl_conf = log->ppl_conf;
592 struct r5conf *conf = ppl_conf->mddev->private;
618 struct ppl_conf *ppl_conf = log->ppl_conf;
619 struct r5conf *conf = ppl_conf->mddev->private;
640 bio = bio_alloc_bioset(GFP_NOIO, 0, &ppl_conf->flush_bs);
675 struct ppl_conf *ppl_conf = conf->log_private;
679 for (i = 0; i < ppl_conf->count; i++) {
680 struct ppl_log *log = &ppl_conf->child_logs[i];
798 struct ppl_conf *ppl_conf = log->ppl_conf;
799 struct mddev *mddev = ppl_conf->mddev;
801 int block_size = ppl_conf->block_size;
973 struct ppl_conf *ppl_conf = log->ppl_conf;
1028 ppl_conf->mismatch_count++;
1033 ppl_conf->recovered_entries++;
1065 pplhdr->signature = cpu_to_le32(log->ppl_conf->signature);
1081 struct ppl_conf *ppl_conf = log->ppl_conf;
1138 ppl_conf->signature = signature;
1139 } else if (ppl_conf->signature != signature) {
1141 __func__, signature, ppl_conf->signature,
1172 ppl_conf->mismatch_count++;
1190 __func__, ret, ppl_conf->mismatch_count,
1191 ppl_conf->recovered_entries);
1195 static int ppl_load(struct ppl_conf *ppl_conf)
1202 for (i = 0; i < ppl_conf->count; i++) {
1203 struct ppl_log *log = &ppl_conf->child_logs[i];
1218 if (ppl_conf->mddev->external) {
1220 signature = ppl_conf->signature;
1222 } else if (signature != ppl_conf->signature) {
1224 mdname(ppl_conf->mddev));
1232 __func__, ret, ppl_conf->mismatch_count,
1233 ppl_conf->recovered_entries);
1237 static void __ppl_exit_log(struct ppl_conf *ppl_conf)
1239 clear_bit(MD_HAS_PPL, &ppl_conf->mddev->flags);
1240 clear_bit(MD_HAS_MULTIPLE_PPLS, &ppl_conf->mddev->flags);
1242 kfree(ppl_conf->child_logs);
1244 bioset_exit(&ppl_conf->bs);
1245 bioset_exit(&ppl_conf->flush_bs);
1246 mempool_exit(&ppl_conf->io_pool);
1247 kmem_cache_destroy(ppl_conf->io_kc);
1249 kfree(ppl_conf);
1254 struct ppl_conf *ppl_conf = conf->log_private;
1256 if (ppl_conf) {
1257 __ppl_exit_log(ppl_conf);
1318 &log->ppl_conf->mddev->flags);
1334 struct ppl_conf *ppl_conf;
1372 ppl_conf = kzalloc(sizeof(struct ppl_conf), GFP_KERNEL);
1373 if (!ppl_conf)
1376 ppl_conf->mddev = mddev;
1378 ppl_conf->io_kc = KMEM_CACHE(ppl_io_unit, 0);
1379 if (!ppl_conf->io_kc) {
1384 ret = mempool_init(&ppl_conf->io_pool, conf->raid_disks, ppl_io_pool_alloc,
1385 ppl_io_pool_free, ppl_conf->io_kc);
1389 ret = bioset_init(&ppl_conf->bs, conf->raid_disks, 0, BIOSET_NEED_BVECS);
1393 ret = bioset_init(&ppl_conf->flush_bs, conf->raid_disks, 0, 0);
1397 ppl_conf->count = conf->raid_disks;
1398 ppl_conf->child_logs = kcalloc(ppl_conf->count, sizeof(struct ppl_log),
1400 if (!ppl_conf->child_logs) {
1405 atomic64_set(&ppl_conf->seq, 0);
1406 INIT_LIST_HEAD(&ppl_conf->no_mem_stripes);
1407 spin_lock_init(&ppl_conf->no_mem_stripes_lock);
1408 ppl_conf->write_hint = RWH_WRITE_LIFE_NOT_SET;
1411 ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid));
1412 ppl_conf->block_size = 512;
1414 ppl_conf->block_size = queue_logical_block_size(mddev->queue);
1417 for (i = 0; i < ppl_conf->count; i++) {
1418 struct ppl_log *log = &ppl_conf->child_logs[i];
1425 log->ppl_conf = ppl_conf;
1438 ret = ppl_load(ppl_conf);
1443 ppl_conf->recovered_entries > 0 &&
1444 ppl_conf->mismatch_count == 0) {
1451 } else if (mddev->pers && ppl_conf->mismatch_count > 0) {
1457 conf->log_private = ppl_conf;
1458 set_bit(MD_HAS_PPL, &ppl_conf->mddev->flags);
1462 __ppl_exit_log(ppl_conf);
1468 struct ppl_conf *ppl_conf = conf->log_private;
1483 if (rdev->raid_disk >= ppl_conf->count)
1486 log = &ppl_conf->child_logs[rdev->raid_disk];
1509 struct ppl_conf *ppl_conf = NULL;
1514 ppl_conf = conf->log_private;
1515 ret = sprintf(buf, "%d\n", ppl_conf ? ppl_conf->write_hint : 0);
1525 struct ppl_conf *ppl_conf;
1542 ppl_conf = conf->log_private;
1543 if (!ppl_conf)
1546 ppl_conf->write_hint = new;