Lines Matching refs:raid
545 /* create sense buffer for the raid 1/10 fp */
1461 struct MR_LD_RAID *raid;
1497 raid = MR_LdRaidGet(i, map);
1499 ld_sync->seqNum = raid->seqNum;
2353 struct MR_LD_RAID *raid;
2362 raid = MR_LdRaidGet(ld, local_map_ptr);
2363 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) {
2546 /* if we found a stream, update the raid
2599 * affinity (cpu of the controller) and raid_flags in the raid context
2604 * @raid: LD raid map
2613 struct MR_LD_RAID *raid, bool fp_possible,
2622 if ((raid->cpuAffinity.pdRead.cpu0) &&
2623 (raid->cpuAffinity.pdRead.cpu1))
2625 else if (raid->cpuAffinity.pdRead.cpu1)
2628 if ((raid->cpuAffinity.pdWrite.cpu0) &&
2629 (raid->cpuAffinity.pdWrite.cpu1))
2631 else if (raid->cpuAffinity.pdWrite.cpu1)
2634 if ((raid->level <= 1) &&
2635 (raid->capability.fp_cache_bypass_capable)) {
2645 if ((raid->cpuAffinity.ldRead.cpu0) &&
2646 (raid->cpuAffinity.ldRead.cpu1))
2648 else if (raid->cpuAffinity.ldRead.cpu1)
2651 if ((raid->cpuAffinity.ldWrite.cpu0) &&
2652 (raid->cpuAffinity.ldWrite.cpu1))
2654 else if (raid->cpuAffinity.ldWrite.cpu1)
2658 ((raid->level == 5) || (raid->level == 6)) &&
2659 (raid->writeMode == MR_RL_WRITE_THROUGH_MODE) &&
2672 if ((fusion->pcie_bw_limitation) && (raid->level == 1) && (!is_read) &&
2704 struct MR_LD_RAID *raid = NULL;
2791 raid = MR_LdRaidGet(ld, local_map_ptr);
2793 if (!raid || (!fusion->fast_path_io)) {
2805 /* FP for Optimal raid level 1.
2845 /* If raid is NULL, set CPU affinity to default CPU0 */
2846 if (raid)
2848 raid, fp_possible, io_info.isRead,
2957 struct MR_LD_RAID *raid;
2979 raid = MR_LdRaidGet(ld, local_map_ptr);
2980 if (!(raid->capability.fpNonRWCapable))
2998 pRAID_Context->config_seq_num = raid->seqNum;
3002 cpu_to_le16(raid->fpIoTimeoutForLd);
3018 memcpy(io_request->LUN, raid->LUN, 8);
3275 * It prepares the raid 1 second IO
3373 * if it is raid 1/10 fp write capable.