Home
last modified time | relevance | path

Searched refs:raid (Results 101 - 125 of 132) sorted by relevance

123456

/kernel/linux/linux-6.6/fs/btrfs/
H A Draid56.c11 #include <linux/raid/pq.h>
14 #include <linux/raid/xor.h>
862 * @rbio: The raid bio
2331 * raid bio are correct and not be changed during the scrub/replace. That
/kernel/linux/linux-5.10/drivers/staging/rtl8723bs/core/
H A Drtw_xmit.c451 pattrib->raid = psta->raid; in update_attrib_phy_info()
H A Drtw_mlme.c1146 psta->raid = networktype_to_raid_ex(padapter, psta); in rtw_joinbss_update_stainfo()
/kernel/linux/linux-6.6/drivers/staging/rtl8723bs/core/
H A Drtw_xmit.c450 pattrib->raid = psta->raid; in update_attrib_phy_info()
H A Drtw_mlme.c1032 psta->raid = networktype_to_raid_ex(padapter, psta); in rtw_joinbss_update_stainfo()
/kernel/linux/linux-5.10/drivers/md/
H A Draid5-ppl.c12 #include <linux/raid/md_p.h>
34 * stripe. The modifed raid data chunks form an m-by-n matrix, where m is the
48 * data_sector is the first raid sector of the modified data, data_size is the
90 /* array of child logs, one for each raid disk */
96 u32 signature; /* raid array identifier */
147 atomic_t pending_stripes; /* how many stripes not written to raid */
887 /* map raid sector to member disk */ in ppl_recover_entry()
942 /* map raid sector to parity disk */ in ppl_recover_entry()
1223 pr_warn("md/raid:%s: PPL header signature does not match on all member drives\n", in ppl_load()
1281 pr_warn("md/raid in ppl_validate_rdev()
[all...]
H A Dmd-cluster.c11 #include <linux/raid/md_p.h>
62 /* dlm lock space and resources for clustered raid. */
1245 * Update the size for cluster raid is a little more complex, we perform it
H A Draid5-cache.c10 #include <linux/raid/md_p.h>
69 * Stripes in caching phase do not write the raid disks. Instead, all
78 * - write data and parity to raid disks
206 * first and then start move data to raid disks, there is no requirement to
217 atomic_t pending_stripe;/* how many stripes not flushed to raid */
247 IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */
690 pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n", in r5c_disable_writeback_async()
990 * data from log to raid disks), so we shouldn't wait for reclaim here
1007 /* the stripe is written to log, we start writing it to raid */ in r5l_write_stripe()
1105 * raid disk in r5l_handle_flush_request()
[all...]
H A Dmd.c59 #include <linux/raid/md_p.h>
60 #include <linux/raid/md_u.h>
61 #include <linux/raid/detect.h>
111 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
313 .procname = "raid",
516 /* restrict memory reclaim I/O during raid array is suspend */ in mddev_suspend()
1226 pr_warn("md: invalid raid superblock magic on %s\n", b); in super_90_load()
5385 /* cluster raid doesn't support change array_sectors */ in array_size_store()
6345 * This is called from dm-raid in md_stop()
7315 /* change the number of raid disk in update_raid_disks()
[all...]
/kernel/linux/linux-5.10/drivers/scsi/esas2r/
H A Datvda.h930 struct atto_vda_ae_raid raid; member
H A Desas2r_main.c1455 struct atto_vda_ae_raid *r = &ae->raid; in esas2r_nuxi_ae_data()
/kernel/linux/linux-6.6/drivers/md/
H A Draid5-ppl.c12 #include <linux/raid/md_p.h>
34 * stripe. The modifed raid data chunks form an m-by-n matrix, where m is the
48 * data_sector is the first raid sector of the modified data, data_size is the
90 /* array of child logs, one for each raid disk */
96 u32 signature; /* raid array identifier */
147 atomic_t pending_stripes; /* how many stripes not written to raid */
877 /* map raid sector to member disk */ in ppl_recover_entry()
934 /* map raid sector to parity disk */ in ppl_recover_entry()
1216 pr_warn("md/raid:%s: PPL header signature does not match on all member drives\n", in ppl_load()
1273 pr_warn("md/raid in ppl_validate_rdev()
[all...]
H A Dmd-cluster.c11 #include <linux/raid/md_p.h>
62 /* dlm lock space and resources for clustered raid. */
1249 * Update the size for cluster raid is a little more complex, we perform it
H A Draid5-cache.c10 #include <linux/raid/md_p.h>
69 * Stripes in caching phase do not write the raid disks. Instead, all
78 * - write data and parity to raid disks
206 * first and then start move data to raid disks, there is no requirement to
217 atomic_t pending_stripe;/* how many stripes not flushed to raid */
247 IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */
690 pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n", in r5c_disable_writeback_async()
989 * data from log to raid disks), so we shouldn't wait for reclaim here
1006 /* the stripe is written to log, we start writing it to raid */ in r5l_write_stripe()
1104 * raid disk in r5l_handle_flush_request()
[all...]
H A Dmd.c61 #include <linux/raid/md_p.h>
62 #include <linux/raid/md_u.h>
63 #include <linux/raid/detect.h>
111 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
460 /* restrict memory reclaim I/O during raid array is suspend */ in mddev_suspend()
1184 pr_warn("md: invalid raid superblock magic on %pg\n", in super_90_load()
5358 /* cluster raid doesn't support change array_sectors */ in array_size_store()
6362 * This is called from dm-raid in md_stop()
7347 /* change the number of raid disks */ in update_raid_disks()
7973 * raid personalit in md_thread()
[all...]
H A Ddm-raid.c20 #define DM_MSG_PREFIX "raid"
21 #define MAX_RAID_DEVICES 253 /* md-raid kernel limit */
24 * Minimum sectors of free reshape space per raid device
36 * The following flags are used by dm-raid to set up the array state.
112 * per raid level.
137 /* Valid options definitions per raid level... */
197 /* ...valid options definitions per raid level */
205 * the raid set all over again.
221 * raid set level, layout and chunk sectors backup/restore
284 /* Supported raid type
4079 module_dm(raid); global() variable
[all...]
/kernel/linux/linux-6.6/drivers/scsi/esas2r/
H A Datvda.h930 struct atto_vda_ae_raid raid; member
H A Desas2r_main.c1453 struct atto_vda_ae_raid *r = &ae->raid; in esas2r_nuxi_ae_data()
/kernel/linux/linux-5.10/drivers/scsi/megaraid/
H A Dmegaraid_sas_base.c1889 struct MR_LD_RAID *raid; in megasas_set_dynamic_target_properties() local
1906 raid = MR_LdRaidGet(ld, local_map_ptr); in megasas_set_dynamic_target_properties()
1908 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) in megasas_set_dynamic_target_properties()
1912 raid->capability.tmCapable; in megasas_set_dynamic_target_properties()
1914 if (!raid->flags.isEPD) in megasas_set_dynamic_target_properties()
5134 /* irrespective of FW raid maps, driver raid map is constant */ in megasas_update_ext_vd_details()
/kernel/linux/linux-6.6/drivers/scsi/megaraid/
H A Dmegaraid_sas_base.c1899 struct MR_LD_RAID *raid; in megasas_set_dynamic_target_properties() local
1916 raid = MR_LdRaidGet(ld, local_map_ptr); in megasas_set_dynamic_target_properties()
1918 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) in megasas_set_dynamic_target_properties()
1922 raid->capability.tmCapable; in megasas_set_dynamic_target_properties()
1924 if (!raid->flags.isEPD) in megasas_set_dynamic_target_properties()
5165 /* irrespective of FW raid maps, driver raid map is constant */ in megasas_update_ext_vd_details()
/kernel/linux/linux-5.10/drivers/dma/
H A Diop-adma.c22 #include <linux/raid/pq.h>
H A Dbcm-sba-raid.c50 #include <linux/raid/pq.h>
1771 .name = "bcm-sba-raid",
/kernel/linux/linux-6.6/drivers/dma/
H A Dbcm-sba-raid.c42 #include <linux/raid/pq.h>
1763 .name = "bcm-sba-raid",
/kernel/linux/linux-5.10/drivers/staging/rtl8723bs/os_dep/
H A Dioctl_linux.c2807 DBG_871X("state = 0x%x, aid =%d, macid =%d, raid =%d\n", psta->state, psta->aid, psta->mac_id, psta->raid); in rtw_dbg_port()
2864 DBG_871X("state = 0x%x, aid =%d, macid =%d, raid =%d\n", psta->state, psta->aid, psta->mac_id, psta->raid); in rtw_dbg_port()
/kernel/linux/linux-5.10/drivers/staging/rtl8188eu/core/
H A Drtw_mlme_ext.c198 pattrib->raid = 6;/* b mode */ in update_mgntframe_attrib()
200 pattrib->raid = 5;/* a/g mode */ in update_mgntframe_attrib()
3006 /* todo: mask supportRate between AP & STA -> move to update raid */ in OnAssocReq()

Completed in 92 milliseconds

123456