Home
last modified time | relevance | path

Searched refs:md (Results 1 - 25 of 425) sorted by relevance

12345678910>>...17

/kernel/linux/linux-6.6/drivers/video/fbdev/omap/
H A Dlcd_mipid.c56 static void mipid_transfer(struct mipid_device *md, int cmd, const u8 *wbuf, in mipid_transfer() argument
64 BUG_ON(md->spi == NULL); in mipid_transfer()
105 r = spi_sync(md->spi, &m); in mipid_transfer()
107 dev_dbg(&md->spi->dev, "spi_sync %d\n", r); in mipid_transfer()
113 static inline void mipid_cmd(struct mipid_device *md, int cmd) in mipid_cmd() argument
115 mipid_transfer(md, cmd, NULL, 0, NULL, 0); in mipid_cmd()
118 static inline void mipid_write(struct mipid_device *md, in mipid_write() argument
121 mipid_transfer(md, reg, buf, len, NULL, 0); in mipid_write()
124 static inline void mipid_read(struct mipid_device *md, in mipid_read() argument
127 mipid_transfer(md, re in mipid_read()
130 set_data_lines(struct mipid_device *md, int data_lines) set_data_lines() argument
148 send_init_string(struct mipid_device *md) send_init_string() argument
156 hw_guard_start(struct mipid_device *md, int guard_msec) hw_guard_start() argument
162 hw_guard_wait(struct mipid_device *md) hw_guard_wait() argument
172 set_sleep_mode(struct mipid_device *md, int on) set_sleep_mode() argument
194 set_display_state(struct mipid_device *md, int enabled) set_display_state() argument
203 struct mipid_device *md = to_mipid_device(panel); mipid_set_bklight_level() local
221 struct mipid_device *md = to_mipid_device(panel); mipid_get_bklight_level() local
231 struct mipid_device *md = to_mipid_device(panel); mipid_get_bklight_max() local
245 read_first_pixel(struct mipid_device *md) read_first_pixel() argument
275 struct mipid_device *md = to_mipid_device(panel); mipid_run_test() local
312 ls041y3_esd_recover(struct mipid_device *md) ls041y3_esd_recover() argument
319 ls041y3_esd_check_mode1(struct mipid_device *md) ls041y3_esd_check_mode1() argument
335 ls041y3_esd_check_mode2(struct mipid_device *md) ls041y3_esd_check_mode2() argument
371 ls041y3_esd_check(struct mipid_device *md) ls041y3_esd_check() argument
378 mipid_esd_start_check(struct mipid_device *md) mipid_esd_start_check() argument
385 mipid_esd_stop_check(struct mipid_device *md) mipid_esd_stop_check() argument
393 struct mipid_device *md = container_of(work, struct mipid_device, mipid_esd_work() local
404 struct mipid_device *md = to_mipid_device(panel); mipid_enable() local
425 struct mipid_device *md = to_mipid_device(panel); mipid_disable() local
447 panel_enabled(struct mipid_device *md) panel_enabled() argument
464 struct mipid_device *md = to_mipid_device(panel); mipid_init() local
482 struct mipid_device *md = to_mipid_device(panel); mipid_cleanup() local
513 mipid_detect(struct mipid_device *md) mipid_detect() argument
552 struct mipid_device *md; mipid_spi_probe() local
587 struct mipid_device *md = dev_get_drvdata(&spi->dev); mipid_spi_remove() local
[all...]
/kernel/linux/linux-5.10/drivers/video/fbdev/omap/
H A Dlcd_mipid.c54 static void mipid_transfer(struct mipid_device *md, int cmd, const u8 *wbuf, in mipid_transfer() argument
62 BUG_ON(md->spi == NULL); in mipid_transfer()
103 r = spi_sync(md->spi, &m); in mipid_transfer()
105 dev_dbg(&md->spi->dev, "spi_sync %d\n", r); in mipid_transfer()
111 static inline void mipid_cmd(struct mipid_device *md, int cmd) in mipid_cmd() argument
113 mipid_transfer(md, cmd, NULL, 0, NULL, 0); in mipid_cmd()
116 static inline void mipid_write(struct mipid_device *md, in mipid_write() argument
119 mipid_transfer(md, reg, buf, len, NULL, 0); in mipid_write()
122 static inline void mipid_read(struct mipid_device *md, in mipid_read() argument
125 mipid_transfer(md, re in mipid_read()
128 set_data_lines(struct mipid_device *md, int data_lines) set_data_lines() argument
146 send_init_string(struct mipid_device *md) send_init_string() argument
154 hw_guard_start(struct mipid_device *md, int guard_msec) hw_guard_start() argument
160 hw_guard_wait(struct mipid_device *md) hw_guard_wait() argument
170 set_sleep_mode(struct mipid_device *md, int on) set_sleep_mode() argument
192 set_display_state(struct mipid_device *md, int enabled) set_display_state() argument
201 struct mipid_device *md = to_mipid_device(panel); mipid_set_bklight_level() local
219 struct mipid_device *md = to_mipid_device(panel); mipid_get_bklight_level() local
229 struct mipid_device *md = to_mipid_device(panel); mipid_get_bklight_max() local
243 read_first_pixel(struct mipid_device *md) read_first_pixel() argument
273 struct mipid_device *md = to_mipid_device(panel); mipid_run_test() local
310 ls041y3_esd_recover(struct mipid_device *md) ls041y3_esd_recover() argument
317 ls041y3_esd_check_mode1(struct mipid_device *md) ls041y3_esd_check_mode1() argument
333 ls041y3_esd_check_mode2(struct mipid_device *md) ls041y3_esd_check_mode2() argument
369 ls041y3_esd_check(struct mipid_device *md) ls041y3_esd_check() argument
376 mipid_esd_start_check(struct mipid_device *md) mipid_esd_start_check() argument
383 mipid_esd_stop_check(struct mipid_device *md) mipid_esd_stop_check() argument
391 struct mipid_device *md = container_of(work, struct mipid_device, mipid_esd_work() local
402 struct mipid_device *md = to_mipid_device(panel); mipid_enable() local
423 struct mipid_device *md = to_mipid_device(panel); mipid_disable() local
445 panel_enabled(struct mipid_device *md) panel_enabled() argument
462 struct mipid_device *md = to_mipid_device(panel); mipid_init() local
480 struct mipid_device *md = to_mipid_device(panel); mipid_cleanup() local
511 mipid_detect(struct mipid_device *md) mipid_detect() argument
550 struct mipid_device *md; mipid_spi_probe() local
579 struct mipid_device *md = dev_get_drvdata(&spi->dev); mipid_spi_remove() local
[all...]
/kernel/linux/linux-5.10/drivers/md/
H A Ddm.c97 struct mapped_device *md; member
136 * Bits for the md->flags field.
337 int dm_deleting_md(struct mapped_device *md) in dm_deleting_md() argument
339 return test_bit(DMF_DELETING, &md->flags); in dm_deleting_md()
344 struct mapped_device *md; in dm_blk_open() local
348 md = bdev->bd_disk->private_data; in dm_blk_open()
349 if (!md) in dm_blk_open()
352 if (test_bit(DMF_FREEING, &md->flags) || in dm_blk_open()
353 dm_deleting_md(md)) { in dm_blk_open()
354 md in dm_blk_open()
368 struct mapped_device *md; dm_blk_close() local
385 dm_open_count(struct mapped_device *md) dm_open_count() argument
393 dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) dm_lock_for_deletion() argument
413 dm_cancel_deferred_remove(struct mapped_device *md) dm_cancel_deferred_remove() argument
436 struct mapped_device *md = bdev->bd_disk->private_data; dm_blk_getgeo() local
475 struct mapped_device *md = disk->private_data; dm_blk_report_zones() local
519 dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, struct block_device **bdev) dm_prepare_ioctl() argument
553 dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) dm_unprepare_ioctl() argument
561 struct mapped_device *md = bdev->bd_disk->private_data; dm_blk_ioctl() local
599 struct mapped_device *md = io->md; start_io_acct() local
609 end_io_acct(struct mapped_device *md, struct bio *bio, unsigned long start_time, struct dm_stats_aux *stats_aux) end_io_acct() argument
628 alloc_io(struct mapped_device *md, struct bio *bio) alloc_io() argument
655 free_io(struct mapped_device *md, struct dm_io *io) free_io() argument
695 queue_io(struct mapped_device *md, struct bio *bio) queue_io() argument
722 dm_sync_table(struct mapped_device *md) dm_sync_table() argument
748 open_table_device(struct table_device *td, dev_t dev, struct mapped_device *md) open_table_device() argument
775 close_table_device(struct table_device *td, struct mapped_device *md) close_table_device() argument
799 dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, struct dm_dev **result) dm_get_table_device() argument
837 dm_put_table_device(struct mapped_device *md, struct dm_dev *d) dm_put_table_device() argument
867 dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) dm_get_geometry() argument
877 dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) dm_set_geometry() argument
891 __noflush_suspending(struct mapped_device *md) __noflush_suspending() argument
905 struct mapped_device *md = io->md; dec_pending() local
958 disable_discard(struct mapped_device *md) disable_discard() argument
967 disable_write_same(struct mapped_device *md) disable_write_same() argument
975 disable_write_zeroes(struct mapped_device *md) disable_write_zeroes() argument
993 struct mapped_device *md = tio->io->md; clone_endio() local
1039 struct mapped_device *md = io->md; clone_endio() local
1116 struct mapped_device *md = dax_get_private(dax_dev); dm_dax_direct_access() local
1143 struct mapped_device *md = dax_get_private(dax_dev); dm_dax_supported() local
1163 struct mapped_device *md = dax_get_private(dax_dev); dm_dax_copy_from_iter() local
1187 struct mapped_device *md = dax_get_private(dax_dev); dm_dax_copy_to_iter() local
1211 struct mapped_device *md = dax_get_private(dax_dev); dm_dax_zero_page_range() local
1280 __set_swap_bios_limit(struct mapped_device *md, int latch) __set_swap_bios_limit() argument
1316 struct mapped_device *md = io->md; __map_bio() local
1335 struct mapped_device *md = io->md; __map_bio() local
1343 struct mapped_device *md = io->md; __map_bio() local
1618 init_clone_info(struct clone_info *ci, struct mapped_device *md, struct dm_table *map, struct bio *bio) init_clone_info() argument
1632 __split_and_process_bio(struct mapped_device *md, struct dm_table *map, struct bio *bio) __split_and_process_bio() argument
1693 struct mapped_device *md = bio->bi_disk->private_data; dm_submit_bio() local
1780 cleanup_mapped_device(struct mapped_device *md) cleanup_mapped_device() argument
1825 struct mapped_device *md; alloc_dev() local
1939 free_dev(struct mapped_device *md) free_dev() argument
1955 __bind_mempools(struct mapped_device *md, struct dm_table *t) __bind_mempools() argument
2004 struct mapped_device *md = (struct mapped_device *) context; event_callback() local
2020 __bind(struct mapped_device *md, struct dm_table *t, struct queue_limits *limits) __bind() argument
2073 __unbind(struct mapped_device *md) __unbind() argument
2093 struct mapped_device *md; dm_create() local
2113 dm_lock_md_type(struct mapped_device *md) dm_lock_md_type() argument
2118 dm_unlock_md_type(struct mapped_device *md) dm_unlock_md_type() argument
2123 dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) dm_set_md_type() argument
2129 dm_get_md_type(struct mapped_device *md) dm_get_md_type() argument
2134 dm_get_immutable_target_type(struct mapped_device *md) dm_get_immutable_target_type() argument
2143 dm_get_queue_limits(struct mapped_device *md) dm_get_queue_limits() argument
2153 dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) dm_setup_md_queue() argument
2189 struct mapped_device *md; dm_get_md() local
2211 dm_get_mdptr(struct mapped_device *md) dm_get_mdptr() argument
2216 dm_set_mdptr(struct mapped_device *md, void *ptr) dm_set_mdptr() argument
2221 dm_get(struct mapped_device *md) dm_get() argument
2227 dm_hold(struct mapped_device *md) dm_hold() argument
2240 dm_device_name(struct mapped_device *md) dm_device_name() argument
2246 __dm_destroy(struct mapped_device *md, bool wait) __dm_destroy() argument
2294 dm_destroy(struct mapped_device *md) dm_destroy() argument
2299 dm_destroy_immediate(struct mapped_device *md) dm_destroy_immediate() argument
2304 dm_put(struct mapped_device *md) dm_put() argument
2310 md_in_flight_bios(struct mapped_device *md) md_in_flight_bios() argument
2324 dm_wait_for_bios_completion(struct mapped_device *md, long task_state) dm_wait_for_bios_completion() argument
2349 dm_wait_for_completion(struct mapped_device *md, long task_state) dm_wait_for_completion() argument
2376 struct mapped_device *md = container_of(work, struct mapped_device, work); dm_wq_work() local
2392 dm_queue_flush(struct mapped_device *md) dm_queue_flush() argument
2402 dm_swap_table(struct mapped_device *md, struct dm_table *table) dm_swap_table() argument
2447 lock_fs(struct mapped_device *md) lock_fs() argument
2465 unlock_fs(struct mapped_device *md) unlock_fs() argument
2484 __dm_suspend(struct mapped_device *md, struct dm_table *map, unsigned suspend_flags, long task_state, int dmf_suspended_flag) __dm_suspend() argument
2592 dm_suspend(struct mapped_device *md, unsigned suspend_flags) dm_suspend() argument
2629 __dm_resume(struct mapped_device *md, struct dm_table *map) __dm_resume() argument
2652 dm_resume(struct mapped_device *md) dm_resume() argument
2694 __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) __dm_internal_suspend() argument
2724 __dm_internal_resume(struct mapped_device *md) __dm_internal_resume() argument
2746 dm_internal_suspend_noflush(struct mapped_device *md) dm_internal_suspend_noflush() argument
2754 dm_internal_resume(struct mapped_device *md) dm_internal_resume() argument
2767 dm_internal_suspend_fast(struct mapped_device *md) dm_internal_suspend_fast() argument
2780 dm_internal_resume_fast(struct mapped_device *md) dm_internal_resume_fast() argument
2795 dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, unsigned cookie) dm_kobject_uevent() argument
2819 dm_next_uevent_seq(struct mapped_device *md) dm_next_uevent_seq() argument
2824 dm_get_event_nr(struct mapped_device *md) dm_get_event_nr() argument
2829 dm_wait_event(struct mapped_device *md, int event_nr) dm_wait_event() argument
2835 dm_uevent_add(struct mapped_device *md, struct list_head *elist) dm_uevent_add() argument
2848 dm_disk(struct mapped_device *md) dm_disk() argument
2854 dm_kobject(struct mapped_device *md) dm_kobject() argument
2861 struct mapped_device *md; dm_get_from_kobject() local
2877 dm_suspended_md(struct mapped_device *md) dm_suspended_md() argument
2882 dm_post_suspending_md(struct mapped_device *md) dm_post_suspending_md() argument
2887 dm_suspended_internally_md(struct mapped_device *md) dm_suspended_internally_md() argument
2892 dm_test_deferred_remove_flag(struct mapped_device *md) dm_test_deferred_remove_flag() argument
2915 dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, unsigned integrity, unsigned per_io_data_size, unsigned min_pool_size) dm_alloc_md_mempools() argument
2984 struct mapped_device *md = bdev->bd_disk->private_data; dm_call_pr() local
3054 struct mapped_device *md = bdev->bd_disk->private_data; dm_pr_reserve() local
3074 struct mapped_device *md = bdev->bd_disk->private_data; dm_pr_release() local
3095 struct mapped_device *md = bdev->bd_disk->private_data; dm_pr_preempt() local
3115 struct mapped_device *md = bdev->bd_disk->private_data; dm_pr_clear() local
[all...]
H A Ddm-era-target.c34 struct writeset_metadata md; member
94 ws->md.nr_bits = nr_blocks; in writeset_init()
95 r = setup_on_disk_bitset(info, ws->md.nr_bits, &ws->md.root); in writeset_init()
139 r = dm_bitset_set_bit(info, ws->md.root, block, &ws->md.root); in writeset_test_and_set()
301 static int superblock_read_lock(struct era_metadata *md, in superblock_read_lock() argument
304 return dm_bm_read_lock(md->bm, SUPERBLOCK_LOCATION, in superblock_read_lock()
308 static int superblock_lock_zero(struct era_metadata *md, in superblock_lock_zero() argument
311 return dm_bm_write_lock_zero(md in superblock_lock_zero()
315 superblock_lock(struct era_metadata *md, struct dm_block **sblock) superblock_lock() argument
368 struct era_metadata *md = context; ws_inc() local
380 struct era_metadata *md = context; ws_dec() local
397 setup_writeset_tree_info(struct era_metadata *md) setup_writeset_tree_info() argument
409 setup_era_array_info(struct era_metadata *md) setup_era_array_info() argument
422 setup_infos(struct era_metadata *md) setup_infos() argument
431 create_fresh_metadata(struct era_metadata *md) create_fresh_metadata() argument
465 save_sm_root(struct era_metadata *md) save_sm_root() argument
478 copy_sm_root(struct era_metadata *md, struct superblock_disk *disk) copy_sm_root() argument
490 prepare_superblock(struct era_metadata *md, struct superblock_disk *disk) prepare_superblock() argument
512 write_superblock(struct era_metadata *md) write_superblock() argument
537 format_metadata(struct era_metadata *md) format_metadata() argument
555 open_metadata(struct era_metadata *md) open_metadata() argument
606 open_or_format_metadata(struct era_metadata *md, bool may_format) open_or_format_metadata() argument
622 create_persistent_data_objects(struct era_metadata *md, bool may_format) create_persistent_data_objects() argument
641 destroy_persistent_data_objects(struct era_metadata *md) destroy_persistent_data_objects() argument
651 swap_writeset(struct era_metadata *md, struct writeset *new_writeset) swap_writeset() argument
677 metadata_digest_remove_writeset(struct era_metadata *md, struct digest *d) metadata_digest_remove_writeset() argument
696 metadata_digest_transcribe_writeset(struct era_metadata *md, struct digest *d) metadata_digest_transcribe_writeset() argument
730 metadata_digest_lookup_writeset(struct era_metadata *md, struct digest *d) metadata_digest_lookup_writeset() argument
772 metadata_digest_start(struct era_metadata *md, struct digest *d) metadata_digest_start() argument
792 struct era_metadata *md = kzalloc(sizeof(*md), GFP_KERNEL); metadata_open() local
813 metadata_close(struct era_metadata *md) metadata_close() argument
830 metadata_resize(struct era_metadata *md, void *arg) metadata_resize() argument
874 metadata_era_archive(struct era_metadata *md) metadata_era_archive() argument
905 next_writeset(struct era_metadata *md) next_writeset() argument
911 metadata_new_era(struct era_metadata *md) metadata_new_era() argument
928 metadata_era_rollover(struct era_metadata *md) metadata_era_rollover() argument
951 metadata_current_marked(struct era_metadata *md, dm_block_t block) metadata_current_marked() argument
964 metadata_commit(struct era_metadata *md) metadata_commit() argument
1001 metadata_checkpoint(struct era_metadata *md) metadata_checkpoint() argument
1013 metadata_take_snap(struct era_metadata *md) metadata_take_snap() argument
1072 metadata_drop_snap(struct era_metadata *md) metadata_drop_snap() argument
1125 metadata_get_stats(struct era_metadata *md, void *ptr) metadata_get_stats() argument
1162 struct era_metadata *md; global() member
1443 struct era_metadata *md; era_ctr() local
[all...]
H A Ddm-rq.c19 struct mapped_device *md; member
60 int dm_request_based(struct mapped_device *md) in dm_request_based() argument
62 return queue_is_mq(md->queue); in dm_request_based()
128 static void rq_end_stats(struct mapped_device *md, struct request *orig) in rq_end_stats() argument
130 if (unlikely(dm_stats_used(&md->stats))) { in rq_end_stats()
133 dm_stats_account_io(&md->stats, rq_data_dir(orig), in rq_end_stats()
140 * Don't touch any member of the md after calling this function because
141 * the md may be freed in dm_put() at the end of this function.
144 static void rq_completed(struct mapped_device *md) in rq_completed() argument
149 dm_put(md); in rq_completed()
160 struct mapped_device *md = tio->md; dm_end_request() local
176 dm_mq_kick_requeue_list(struct mapped_device *md) dm_mq_kick_requeue_list() argument
190 struct mapped_device *md = tio->md; dm_requeue_original_request() local
261 struct mapped_device *md = tio->md; dm_softirq_done() local
354 init_tio(struct dm_rq_target_io *tio, struct request *rq, struct mapped_device *md) init_tio() argument
382 struct mapped_device *md = tio->md; map_request() local
431 dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf) dm_attr_rq_based_seq_io_merge_deadline_show() argument
436 dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md, const char *buf, size_t count) dm_attr_rq_based_seq_io_merge_deadline_store() argument
442 dm_start_request(struct mapped_device *md, struct request *orig) dm_start_request() argument
468 struct mapped_device *md = set->driver_data; dm_mq_init_request() local
490 struct mapped_device *md = tio->md; dm_mq_queue_rq() local
536 dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t) dm_mq_init_request_queue() argument
582 dm_mq_cleanup_mapped_device(struct mapped_device *md) dm_mq_cleanup_mapped_device() argument
[all...]
H A Ddm.h79 void dm_lock_md_type(struct mapped_device *md);
80 void dm_unlock_md_type(struct mapped_device *md);
81 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type);
82 enum dm_queue_mode dm_get_md_type(struct mapped_device *md);
83 struct target_type *dm_get_immutable_target_type(struct mapped_device *md);
85 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
118 int dm_deleting_md(struct mapped_device *md);
123 int dm_suspended_md(struct mapped_device *md);
128 int dm_suspended_internally_md(struct mapped_device *md);
129 void dm_internal_suspend_fast(struct mapped_device *md);
[all...]
H A Ddm-ioctl.c45 struct mapped_device *md; member
120 dm_get(hc->md); in __get_name_cell()
134 dm_get(hc->md); in __get_uuid_cell()
143 struct mapped_device *md; in __get_dev_cell() local
146 md = dm_get_md(huge_decode_dev(dev)); in __get_dev_cell()
147 if (!md) in __get_dev_cell()
150 hc = dm_get_mdptr(md); in __get_dev_cell()
152 dm_put(md); in __get_dev_cell()
163 struct mapped_device *md) in alloc_cell()
191 hc->md in alloc_cell()
162 alloc_cell(const char *name, const char *uuid, struct mapped_device *md) alloc_cell() argument
209 dm_hash_insert(const char *name, const char *uuid, struct mapped_device *md) dm_hash_insert() argument
285 struct mapped_device *md; dm_hash_remove_all() local
376 struct mapped_device *md; dm_hash_rename() local
683 dm_get_inactive_table(struct mapped_device *md, int *srcu_idx) dm_get_inactive_table() argument
706 dm_get_live_or_inactive_table(struct mapped_device *md, struct dm_ioctl *param, int *srcu_idx) dm_get_live_or_inactive_table() argument
718 __dev_status(struct mapped_device *md, struct dm_ioctl *param) __dev_status() argument
775 struct mapped_device *md; dev_create() local
853 struct mapped_device *md = NULL; find_device() local
867 struct mapped_device *md; dev_remove() local
933 struct mapped_device *md; dev_rename() local
962 struct mapped_device *md; dev_set_geometry() local
1010 struct mapped_device *md; do_suspend() local
1040 struct mapped_device *md; do_resume() local
1122 struct mapped_device *md; dev_status() local
1214 struct mapped_device *md; dev_wait() local
1335 struct mapped_device *md; table_load() local
1421 struct mapped_device *md; table_clear() local
1495 struct mapped_device *md; table_deps() local
1521 struct mapped_device *md; table_status() local
1547 message_for_md(struct mapped_device *md, unsigned argc, char **argv, char *result, unsigned maxlen) message_for_md() argument
1578 struct mapped_device *md; target_message() local
2027 dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid) dm_copy_name_and_uuid() argument
2077 struct mapped_device *md; dm_early_create() local
[all...]
/kernel/linux/linux-6.6/drivers/md/
H A Ddm.c308 int dm_deleting_md(struct mapped_device *md) in dm_deleting_md() argument
310 return test_bit(DMF_DELETING, &md->flags); in dm_deleting_md()
315 struct mapped_device *md; in dm_blk_open() local
319 md = disk->private_data; in dm_blk_open()
320 if (!md) in dm_blk_open()
323 if (test_bit(DMF_FREEING, &md->flags) || in dm_blk_open()
324 dm_deleting_md(md)) { in dm_blk_open()
325 md = NULL; in dm_blk_open()
329 dm_get(md); in dm_blk_open()
330 atomic_inc(&md in dm_blk_open()
339 struct mapped_device *md; dm_blk_close() local
356 dm_open_count(struct mapped_device *md) dm_open_count() argument
364 dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) dm_lock_for_deletion() argument
384 dm_cancel_deferred_remove(struct mapped_device *md) dm_cancel_deferred_remove() argument
407 struct mapped_device *md = bdev->bd_disk->private_data; dm_blk_getgeo() local
412 dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, struct block_device **bdev) dm_prepare_ioctl() argument
446 dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) dm_unprepare_ioctl() argument
454 struct mapped_device *md = bdev->bd_disk->private_data; dm_blk_ioctl() local
573 alloc_io(struct mapped_device *md, struct bio *bio) alloc_io() argument
615 struct mapped_device *md = ci->io->md; alloc_tio() local
668 queue_io(struct mapped_device *md, struct bio *bio) queue_io() argument
697 dm_sync_table(struct mapped_device *md) dm_sync_table() argument
723 open_table_device(struct mapped_device *md, dev_t dev, blk_mode_t mode) open_table_device() argument
770 close_table_device(struct table_device *td, struct mapped_device *md) close_table_device() argument
792 dm_get_table_device(struct mapped_device *md, dev_t dev, blk_mode_t mode, struct dm_dev **result) dm_get_table_device() argument
814 dm_put_table_device(struct mapped_device *md, struct dm_dev *d) dm_put_table_device() argument
827 dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) dm_get_geometry() argument
837 dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) dm_set_geometry() argument
851 __noflush_suspending(struct mapped_device *md) __noflush_suspending() argument
858 struct mapped_device *md = io->md; dm_requeue_add_io() local
870 dm_kick_requeue(struct mapped_device *md, bool first_stage) dm_kick_requeue() argument
888 struct mapped_device *md = io->md; dm_handle_requeue() local
932 struct mapped_device *md = io->md; __dm_io_complete() local
980 struct mapped_device *md = container_of(work, struct mapped_device, dm_wq_requeue_work() local
1066 dm_get_queue_limits(struct mapped_device *md) dm_get_queue_limits() argument
1071 disable_discard(struct mapped_device *md) disable_discard() argument
1079 disable_write_zeroes(struct mapped_device *md) disable_write_zeroes() argument
1099 struct mapped_device *md = io->md; clone_endio() local
1223 struct mapped_device *md = dax_get_private(dax_dev); dm_dax_direct_access() local
1250 struct mapped_device *md = dax_get_private(dax_dev); dm_dax_zero_page_range() local
1277 struct mapped_device *md = dax_get_private(dax_dev); dm_dax_recovery_write() local
1378 __set_swap_bios_limit(struct mapped_device *md, int latch) __set_swap_bios_limit() argument
1399 struct mapped_device *md = io->md; __map_bio() local
1734 init_clone_info(struct clone_info *ci, struct mapped_device *md, struct dm_table *map, struct bio *bio, bool is_abnormal) init_clone_info() argument
1754 dm_split_and_process_bio(struct mapped_device *md, struct dm_table *map, struct bio *bio) dm_split_and_process_bio() argument
1815 struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; dm_submit_bio() local
1970 cleanup_mapped_device(struct mapped_device *md) cleanup_mapped_device() argument
2030 struct mapped_device *md; alloc_dev() local
2146 free_dev(struct mapped_device *md) free_dev() argument
2169 struct mapped_device *md = context; event_callback() local
2185 __bind(struct mapped_device *md, struct dm_table *t, struct queue_limits *limits) __bind() argument
2255 __unbind(struct mapped_device *md) __unbind() argument
2274 struct mapped_device *md; dm_create() local
2290 dm_lock_md_type(struct mapped_device *md) dm_lock_md_type() argument
2295 dm_unlock_md_type(struct mapped_device *md) dm_unlock_md_type() argument
2300 dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) dm_set_md_type() argument
2306 dm_get_md_type(struct mapped_device *md) dm_get_md_type() argument
2311 dm_get_immutable_target_type(struct mapped_device *md) dm_get_immutable_target_type() argument
2319 dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) dm_setup_md_queue() argument
2391 struct mapped_device *md; dm_get_md() local
2413 dm_get_mdptr(struct mapped_device *md) dm_get_mdptr() argument
2418 dm_set_mdptr(struct mapped_device *md, void *ptr) dm_set_mdptr() argument
2423 dm_get(struct mapped_device *md) dm_get() argument
2429 dm_hold(struct mapped_device *md) dm_hold() argument
2442 dm_device_name(struct mapped_device *md) dm_device_name() argument
2448 __dm_destroy(struct mapped_device *md, bool wait) __dm_destroy() argument
2495 dm_destroy(struct mapped_device *md) dm_destroy() argument
2500 dm_destroy_immediate(struct mapped_device *md) dm_destroy_immediate() argument
2505 dm_put(struct mapped_device *md) dm_put() argument
2511 dm_in_flight_bios(struct mapped_device *md) dm_in_flight_bios() argument
2522 dm_wait_for_bios_completion(struct mapped_device *md, unsigned int task_state) dm_wait_for_bios_completion() argument
2547 dm_wait_for_completion(struct mapped_device *md, unsigned int task_state) dm_wait_for_completion() argument
2574 struct mapped_device *md = container_of(work, struct mapped_device, work); dm_wq_work() local
2590 dm_queue_flush(struct mapped_device *md) dm_queue_flush() argument
2600 dm_swap_table(struct mapped_device *md, struct dm_table *table) dm_swap_table() argument
2645 lock_fs(struct mapped_device *md) lock_fs() argument
2657 unlock_fs(struct mapped_device *md) unlock_fs() argument
2674 __dm_suspend(struct mapped_device *md, struct dm_table *map, unsigned int suspend_flags, unsigned int task_state, int dmf_suspended_flag) __dm_suspend() argument
2782 dm_suspend(struct mapped_device *md, unsigned int suspend_flags) dm_suspend() argument
2823 __dm_resume(struct mapped_device *md, struct dm_table *map) __dm_resume() argument
2847 dm_resume(struct mapped_device *md) dm_resume() argument
2889 __dm_internal_suspend(struct mapped_device *md, unsigned int suspend_flags) __dm_internal_suspend() argument
2919 __dm_internal_resume(struct mapped_device *md) __dm_internal_resume() argument
2955 dm_internal_suspend_noflush(struct mapped_device *md) dm_internal_suspend_noflush() argument
2963 dm_internal_resume(struct mapped_device *md) dm_internal_resume() argument
2976 dm_internal_suspend_fast(struct mapped_device *md) dm_internal_suspend_fast() argument
2989 dm_internal_resume_fast(struct mapped_device *md) dm_internal_resume_fast() argument
3006 dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, unsigned int cookie, bool need_resize_uevent) dm_kobject_uevent() argument
3032 dm_next_uevent_seq(struct mapped_device *md) dm_next_uevent_seq() argument
3037 dm_get_event_nr(struct mapped_device *md) dm_get_event_nr() argument
3042 dm_wait_event(struct mapped_device *md, int event_nr) dm_wait_event() argument
3048 dm_uevent_add(struct mapped_device *md, struct list_head *elist) dm_uevent_add() argument
3061 dm_disk(struct mapped_device *md) dm_disk() argument
3067 dm_kobject(struct mapped_device *md) dm_kobject() argument
3074 struct mapped_device *md; dm_get_from_kobject() local
3090 dm_suspended_md(struct mapped_device *md) dm_suspended_md() argument
3095 dm_post_suspending_md(struct mapped_device *md) dm_post_suspending_md() argument
3100 dm_suspended_internally_md(struct mapped_device *md) dm_suspended_internally_md() argument
3105 dm_test_deferred_remove_flag(struct mapped_device *md) dm_test_deferred_remove_flag() argument
3154 struct mapped_device *md = bdev->bd_disk->private_data; dm_call_pr() local
3363 struct mapped_device *md = bdev->bd_disk->private_data; dm_pr_clear() local
[all...]
H A Ddm-era-target.c36 struct writeset_metadata md; member
96 ws->md.nr_bits = nr_blocks; in writeset_init()
97 r = setup_on_disk_bitset(info, ws->md.nr_bits, &ws->md.root); in writeset_init()
142 r = dm_bitset_set_bit(info, ws->md.root, block, &ws->md.root); in writeset_test_and_set()
311 static int superblock_read_lock(struct era_metadata *md, in superblock_read_lock() argument
314 return dm_bm_read_lock(md->bm, SUPERBLOCK_LOCATION, in superblock_read_lock()
318 static int superblock_lock_zero(struct era_metadata *md, in superblock_lock_zero() argument
321 return dm_bm_write_lock_zero(md in superblock_lock_zero()
325 superblock_lock(struct era_metadata *md, struct dm_block **sblock) superblock_lock() argument
378 struct era_metadata *md = context; ws_inc() local
392 struct era_metadata *md = context; ws_dec() local
411 setup_writeset_tree_info(struct era_metadata *md) setup_writeset_tree_info() argument
424 setup_era_array_info(struct era_metadata *md) setup_era_array_info() argument
437 setup_infos(struct era_metadata *md) setup_infos() argument
446 create_fresh_metadata(struct era_metadata *md) create_fresh_metadata() argument
480 save_sm_root(struct era_metadata *md) save_sm_root() argument
493 copy_sm_root(struct era_metadata *md, struct superblock_disk *disk) copy_sm_root() argument
505 prepare_superblock(struct era_metadata *md, struct superblock_disk *disk) prepare_superblock() argument
527 write_superblock(struct era_metadata *md) write_superblock() argument
552 format_metadata(struct era_metadata *md) format_metadata() argument
570 open_metadata(struct era_metadata *md) open_metadata() argument
621 open_or_format_metadata(struct era_metadata *md, bool may_format) open_or_format_metadata() argument
637 create_persistent_data_objects(struct era_metadata *md, bool may_format) create_persistent_data_objects() argument
656 destroy_persistent_data_objects(struct era_metadata *md) destroy_persistent_data_objects() argument
666 swap_writeset(struct era_metadata *md, struct writeset *new_writeset) swap_writeset() argument
694 metadata_digest_remove_writeset(struct era_metadata *md, struct digest *d) metadata_digest_remove_writeset() argument
713 metadata_digest_transcribe_writeset(struct era_metadata *md, struct digest *d) metadata_digest_transcribe_writeset() argument
747 metadata_digest_lookup_writeset(struct era_metadata *md, struct digest *d) metadata_digest_lookup_writeset() argument
789 metadata_digest_start(struct era_metadata *md, struct digest *d) metadata_digest_start() argument
811 struct era_metadata *md = kzalloc(sizeof(*md), GFP_KERNEL); metadata_open() local
832 metadata_close(struct era_metadata *md) metadata_close() argument
849 metadata_resize(struct era_metadata *md, void *arg) metadata_resize() argument
893 metadata_era_archive(struct era_metadata *md) metadata_era_archive() argument
924 next_writeset(struct era_metadata *md) next_writeset() argument
930 metadata_new_era(struct era_metadata *md) metadata_new_era() argument
947 metadata_era_rollover(struct era_metadata *md) metadata_era_rollover() argument
970 metadata_current_marked(struct era_metadata *md, dm_block_t block) metadata_current_marked() argument
983 metadata_commit(struct era_metadata *md) metadata_commit() argument
1020 metadata_checkpoint(struct era_metadata *md) metadata_checkpoint() argument
1032 metadata_take_snap(struct era_metadata *md) metadata_take_snap() argument
1091 metadata_drop_snap(struct era_metadata *md) metadata_drop_snap() argument
1144 metadata_get_stats(struct era_metadata *md, void *ptr) metadata_get_stats() argument
1181 struct era_metadata *md; global() member
1394 in_worker0(struct era *era, int (*fn)(struct era_metadata *md)) in_worker0() argument
1404 in_worker1(struct era *era, int (*fn)(struct era_metadata *md, void *ref), void *arg) in_worker1() argument
1470 struct era_metadata *md; era_ctr() local
[all...]
H A Ddm-ima.c67 static int dm_ima_alloc_and_copy_name_uuid(struct mapped_device *md, char **dev_name, in dm_ima_alloc_and_copy_name_uuid() argument
83 r = dm_copy_name_and_uuid(md, *dev_name, *dev_uuid); in dm_ima_alloc_and_copy_name_uuid()
102 static int dm_ima_alloc_and_copy_device_data(struct mapped_device *md, char **device_data, in dm_ima_alloc_and_copy_device_data() argument
108 r = dm_ima_alloc_and_copy_name_uuid(md, &dev_name, &dev_uuid, noio); in dm_ima_alloc_and_copy_device_data()
120 dev_name, dev_uuid, md->disk->major, md->disk->first_minor, in dm_ima_alloc_and_copy_device_data()
121 md->disk->minors, num_targets); in dm_ima_alloc_and_copy_device_data()
149 static int dm_ima_alloc_and_copy_capacity_str(struct mapped_device *md, char **capacity_str, in dm_ima_alloc_and_copy_capacity_str() argument
154 capacity = get_capacity(md->disk); in dm_ima_alloc_and_copy_capacity_str()
169 void dm_ima_reset_data(struct mapped_device *md) in dm_ima_reset_data() argument
369 dm_ima_measure_on_device_resume(struct mapped_device *md, bool swap) dm_ima_measure_on_device_resume() argument
474 dm_ima_measure_on_device_remove(struct mapped_device *md, bool remove_all) dm_ima_measure_on_device_remove() argument
605 dm_ima_measure_on_table_clear(struct mapped_device *md, bool new_map) dm_ima_measure_on_table_clear() argument
704 dm_ima_measure_on_device_rename(struct mapped_device *md) dm_ima_measure_on_device_rename() argument
[all...]
H A Ddm-rq.c19 struct mapped_device *md; member
59 int dm_request_based(struct mapped_device *md) in dm_request_based() argument
61 return queue_is_mq(md->queue); in dm_request_based()
127 static void rq_end_stats(struct mapped_device *md, struct request *orig) in rq_end_stats() argument
129 if (unlikely(dm_stats_used(&md->stats))) { in rq_end_stats()
133 dm_stats_account_io(&md->stats, rq_data_dir(orig), in rq_end_stats()
140 * Don't touch any member of the md after calling this function because
141 * the md may be freed in dm_put() at the end of this function.
144 static void rq_completed(struct mapped_device *md) in rq_completed() argument
149 dm_put(md); in rq_completed()
160 struct mapped_device *md = tio->md; dm_end_request() local
176 dm_mq_kick_requeue_list(struct mapped_device *md) dm_mq_kick_requeue_list() argument
190 struct mapped_device *md = tio->md; dm_requeue_original_request() local
258 struct mapped_device *md = tio->md; dm_softirq_done() local
338 init_tio(struct dm_rq_target_io *tio, struct request *rq, struct mapped_device *md) init_tio() argument
366 struct mapped_device *md = tio->md; map_request() local
422 dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf) dm_attr_rq_based_seq_io_merge_deadline_show() argument
427 dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md, const char *buf, size_t count) dm_attr_rq_based_seq_io_merge_deadline_store() argument
433 dm_start_request(struct mapped_device *md, struct request *orig) dm_start_request() argument
460 struct mapped_device *md = set->driver_data; dm_mq_init_request() local
482 struct mapped_device *md = tio->md; dm_mq_queue_rq() local
536 dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t) dm_mq_init_request_queue() argument
578 dm_mq_cleanup_mapped_device(struct mapped_device *md) dm_mq_cleanup_mapped_device() argument
[all...]
H A Ddm-zone.c21 static int dm_blk_do_report_zones(struct mapped_device *md, struct dm_table *t, in dm_blk_do_report_zones() argument
25 struct gendisk *disk = md->disk; in dm_blk_do_report_zones()
59 struct mapped_device *md = disk->private_data; in dm_blk_report_zones() local
63 if (dm_suspended_md(md)) in dm_blk_report_zones()
66 map = dm_get_live_table(md, &srcu_idx); in dm_blk_report_zones()
70 ret = dm_blk_do_report_zones(md, map, sector, nr_zones, cb, data); in dm_blk_report_zones()
72 dm_put_live_table(md, srcu_idx); in dm_blk_report_zones()
125 bool dm_is_zone_write(struct mapped_device *md, struct bio *bio) in dm_is_zone_write() argument
127 struct request_queue *q = md->queue; in dm_is_zone_write()
141 void dm_cleanup_zoned_dev(struct mapped_device *md) in dm_cleanup_zoned_dev() argument
180 struct mapped_device *md = data; dm_zone_revalidate_cb() local
226 dm_revalidate_zones(struct mapped_device *md, struct dm_table *t) dm_revalidate_zones() argument
291 struct mapped_device *md = t->md; dm_set_zones_restrictions() local
328 dm_update_zone_wp_offset(struct mapped_device *md, unsigned int zno, unsigned int *wp_ofst) dm_update_zone_wp_offset() argument
367 dm_zone_map_bio_begin(struct mapped_device *md, unsigned int zno, struct bio *clone) dm_zone_map_bio_begin() argument
423 dm_zone_map_bio_end(struct mapped_device *md, unsigned int zno, struct orig_bio_details *orig_bio_details, unsigned int nr_sectors) dm_zone_map_bio_end() argument
516 struct mapped_device *md = io->md; dm_zone_map_bio() local
587 struct mapped_device *md = io->md; dm_zone_endio() local
[all...]
H A Ddm-sysfs.c15 ssize_t (*show)(struct mapped_device *md, char *p);
16 ssize_t (*store)(struct mapped_device *md, const char *p, size_t count);
27 struct mapped_device *md; in dm_attr_show() local
34 md = dm_get_from_kobject(kobj); in dm_attr_show()
35 if (!md) in dm_attr_show()
38 ret = dm_attr->show(md, page); in dm_attr_show()
39 dm_put(md); in dm_attr_show()
52 struct mapped_device *md; in dm_attr_store() local
59 md = dm_get_from_kobject(kobj); in dm_attr_store()
60 if (!md) in dm_attr_store()
69 dm_attr_name_show(struct mapped_device *md, char *buf) dm_attr_name_show() argument
78 dm_attr_uuid_show(struct mapped_device *md, char *buf) dm_attr_uuid_show() argument
87 dm_attr_suspended_show(struct mapped_device *md, char *buf) dm_attr_suspended_show() argument
94 dm_attr_use_blk_mq_show(struct mapped_device *md, char *buf) dm_attr_use_blk_mq_show() argument
133 dm_sysfs_init(struct mapped_device *md) dm_sysfs_init() argument
143 dm_sysfs_exit(struct mapped_device *md) dm_sysfs_exit() argument
[all...]
/kernel/linux/linux-6.6/drivers/net/wwan/t7xx/
H A Dt7xx_modem_ops.c82 struct t7xx_modem *md = t7xx_dev->md; in t7xx_pci_mhccif_isr() local
88 ctl = md->fsm_ctl; in t7xx_pci_mhccif_isr()
95 spin_lock_bh(&md->exp_lock); in t7xx_pci_mhccif_isr()
97 md->exp_id |= int_sta; in t7xx_pci_mhccif_isr()
98 if (md->exp_id & D2H_INT_EXCEPTION_INIT) { in t7xx_pci_mhccif_isr()
103 md->exp_id &= ~D2H_INT_EXCEPTION_INIT; in t7xx_pci_mhccif_isr()
106 } else if (md->exp_id & D2H_INT_PORT_ENUM) { in t7xx_pci_mhccif_isr()
107 md->exp_id &= ~D2H_INT_PORT_ENUM; in t7xx_pci_mhccif_isr()
114 if ((md in t7xx_pci_mhccif_isr()
291 t7xx_md_exception(struct t7xx_modem *md, enum hif_ex_stage stage) t7xx_md_exception() argument
310 t7xx_wait_hif_ex_hk_event(struct t7xx_modem *md, int event_id) t7xx_wait_hif_ex_hk_event() argument
438 t7xx_core_reset(struct t7xx_modem *md) t7xx_core_reset() argument
461 t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_sys_info *core_info, struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id, enum t7xx_fsm_event_state err_detect) t7xx_core_hk_handler() argument
527 struct t7xx_modem *md = container_of(work, struct t7xx_modem, handshake_work); t7xx_md_hk_wq() local
541 struct t7xx_modem *md = container_of(work, struct t7xx_modem, ap_handshake_work); t7xx_ap_hk_wq() local
553 t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id) t7xx_md_event_notify() argument
615 t7xx_md_exception_handshake(struct t7xx_modem *md) t7xx_md_exception_handshake() argument
641 struct t7xx_modem *md; t7xx_md_alloc() local
670 struct t7xx_modem *md = t7xx_dev->md; t7xx_md_reset() local
695 struct t7xx_modem *md; t7xx_md_init() local
761 struct t7xx_modem *md = t7xx_dev->md; t7xx_md_exit() local
[all...]
/kernel/linux/linux-6.6/drivers/net/mdio/
H A Dmdio-mux-bcm-iproc.c57 static void mdio_mux_iproc_config(struct iproc_mdiomux_desc *md) in mdio_mux_iproc_config() argument
63 val = readl(md->base + MDIO_SCAN_CTRL_OFFSET); in mdio_mux_iproc_config()
65 writel(val, md->base + MDIO_SCAN_CTRL_OFFSET); in mdio_mux_iproc_config()
67 if (md->core_clk) { in mdio_mux_iproc_config()
71 divisor = clk_get_rate(md->core_clk) / MDIO_OPERATING_FREQUENCY; in mdio_mux_iproc_config()
75 writel(val, md->base + MDIO_RATE_ADJ_EXT_OFFSET); in mdio_mux_iproc_config()
76 writel(val, md->base + MDIO_RATE_ADJ_INT_OFFSET); in mdio_mux_iproc_config()
136 struct iproc_mdiomux_desc *md = bus->priv; in iproc_mdiomux_read_c22() local
139 ret = start_miim_ops(md->base, false, phyid, reg, 0, MDIO_CTRL_READ_OP); in iproc_mdiomux_read_c22()
149 struct iproc_mdiomux_desc *md in iproc_mdiomux_read_c45() local
163 struct iproc_mdiomux_desc *md = bus->priv; iproc_mdiomux_write_c22() local
178 struct iproc_mdiomux_desc *md = bus->priv; iproc_mdiomux_write_c45() local
193 struct iproc_mdiomux_desc *md = data; mdio_mux_iproc_switch_fn() local
210 struct iproc_mdiomux_desc *md; mdio_mux_iproc_probe() local
292 struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev); mdio_mux_iproc_remove() local
304 struct iproc_mdiomux_desc *md = dev_get_drvdata(dev); mdio_mux_iproc_suspend() local
313 struct iproc_mdiomux_desc *md = dev_get_drvdata(dev); mdio_mux_iproc_resume() local
[all...]
H A Dmdio-mux-bcm6368.c39 struct bcm6368_mdiomux_desc *md = bus->priv; in bcm6368_mdiomux_read() local
43 __raw_writel(0, md->base + MDIOC_REG); in bcm6368_mdiomux_read()
48 if (md->ext_phy) in bcm6368_mdiomux_read()
51 __raw_writel(reg, md->base + MDIOC_REG); in bcm6368_mdiomux_read()
53 ret = __raw_readw(md->base + MDIOD_REG); in bcm6368_mdiomux_read()
61 struct bcm6368_mdiomux_desc *md = bus->priv; in bcm6368_mdiomux_write() local
64 __raw_writel(0, md->base + MDIOC_REG); in bcm6368_mdiomux_write()
69 if (md->ext_phy) in bcm6368_mdiomux_write()
73 __raw_writel(reg, md->base + MDIOC_REG); in bcm6368_mdiomux_write()
82 struct bcm6368_mdiomux_desc *md in bcm6368_mdiomux_switch_fn() local
91 struct bcm6368_mdiomux_desc *md; bcm6368_mdiomux_probe() local
158 struct bcm6368_mdiomux_desc *md = platform_get_drvdata(pdev); bcm6368_mdiomux_remove() local
[all...]
/kernel/linux/linux-5.10/drivers/soundwire/
H A Dmaster.c42 struct sdw_master_device *md = dev_to_sdw_master_device(dev); \
43 return sprintf(buf, format_string, md->bus->prop.field); \
59 struct sdw_master_device *md = dev_to_sdw_master_device(dev); in clock_frequencies_show() local
63 for (i = 0; i < md->bus->prop.num_clk_freq; i++) in clock_frequencies_show()
65 md->bus->prop.clk_freq[i]); in clock_frequencies_show()
75 struct sdw_master_device *md = dev_to_sdw_master_device(dev); in clock_gears_show() local
79 for (i = 0; i < md->bus->prop.num_clk_gears; i++) in clock_gears_show()
81 md->bus->prop.clk_gears[i]); in clock_gears_show()
105 struct sdw_master_device *md = dev_to_sdw_master_device(dev); in sdw_master_device_release() local
107 kfree(md); in sdw_master_device_release()
130 struct sdw_master_device *md; sdw_master_device_add() local
[all...]
/kernel/linux/linux-6.6/drivers/soundwire/
H A Dmaster.c42 struct sdw_master_device *md = dev_to_sdw_master_device(dev); \
43 return sprintf(buf, format_string, md->bus->prop.field); \
59 struct sdw_master_device *md = dev_to_sdw_master_device(dev); in clock_frequencies_show() local
63 for (i = 0; i < md->bus->prop.num_clk_freq; i++) in clock_frequencies_show()
65 md->bus->prop.clk_freq[i]); in clock_frequencies_show()
75 struct sdw_master_device *md = dev_to_sdw_master_device(dev); in clock_gears_show() local
79 for (i = 0; i < md->bus->prop.num_clk_gears; i++) in clock_gears_show()
81 md->bus->prop.clk_gears[i]); in clock_gears_show()
105 struct sdw_master_device *md = dev_to_sdw_master_device(dev); in sdw_master_device_release() local
107 kfree(md); in sdw_master_device_release()
130 struct sdw_master_device *md; sdw_master_device_add() local
[all...]
/kernel/linux/linux-5.10/drivers/net/mdio/
H A Dmdio-mux-bcm-iproc.c57 static void mdio_mux_iproc_config(struct iproc_mdiomux_desc *md) in mdio_mux_iproc_config() argument
63 val = readl(md->base + MDIO_SCAN_CTRL_OFFSET); in mdio_mux_iproc_config()
65 writel(val, md->base + MDIO_SCAN_CTRL_OFFSET); in mdio_mux_iproc_config()
67 if (md->core_clk) { in mdio_mux_iproc_config()
71 divisor = clk_get_rate(md->core_clk) / MDIO_OPERATING_FREQUENCY; in mdio_mux_iproc_config()
75 writel(val, md->base + MDIO_RATE_ADJ_EXT_OFFSET); in mdio_mux_iproc_config()
76 writel(val, md->base + MDIO_RATE_ADJ_INT_OFFSET); in mdio_mux_iproc_config()
136 struct iproc_mdiomux_desc *md = bus->priv; in iproc_mdiomux_read() local
139 ret = start_miim_ops(md->base, phyid, reg, 0, MDIO_CTRL_READ_OP); in iproc_mdiomux_read()
149 struct iproc_mdiomux_desc *md in iproc_mdiomux_write() local
163 struct iproc_mdiomux_desc *md = data; mdio_mux_iproc_switch_fn() local
180 struct iproc_mdiomux_desc *md; mdio_mux_iproc_probe() local
263 struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev); mdio_mux_iproc_remove() local
275 struct iproc_mdiomux_desc *md = dev_get_drvdata(dev); mdio_mux_iproc_suspend() local
284 struct iproc_mdiomux_desc *md = dev_get_drvdata(dev); mdio_mux_iproc_resume() local
[all...]
/kernel/linux/linux-5.10/arch/ia64/kernel/
H A Defi.c267 is_memory_available (efi_memory_desc_t *md) in is_memory_available() argument
269 if (!(md->attribute & EFI_MEMORY_WB)) in is_memory_available()
272 switch (md->type) { in is_memory_available()
291 #define efi_md_size(md) (md->num_pages << EFI_PAGE_SHIFT)
300 efi_md_end(efi_memory_desc_t *md) in efi_md_end() argument
302 return (md->phys_addr + efi_md_size(md)); in efi_md_end()
306 efi_wb(efi_memory_desc_t *md) in efi_wb() argument
308 return (md in efi_wb()
312 efi_uc(efi_memory_desc_t *md) efi_uc() argument
364 efi_memory_desc_t *md; efi_get_pal_addr() local
559 efi_memory_desc_t *md; efi_init() local
604 efi_memory_desc_t *md; efi_enter_virtual_mode() local
689 efi_memory_desc_t *md; efi_get_iobase() local
709 struct kern_memdesc *md; kern_memory_descriptor() local
722 efi_memory_desc_t *md; efi_memory_descriptor() local
742 efi_memory_desc_t *md; efi_memmap_intersects() local
763 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); efi_mem_type() local
773 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); efi_mem_attributes() local
785 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); efi_mem_attribute() local
813 struct kern_memdesc *md; kern_mem_attribute() local
974 efi_memory_desc_t *md, *pmd = NULL, *check_md; find_memmap_space() local
1051 efi_memory_desc_t *md, *pmd = NULL, *check_md; efi_memmap_init() local
1175 efi_memory_desc_t *md; efi_initialize_iomem_resources() local
1286 efi_memory_desc_t *md; kdump_find_rsvd_region() local
1327 efi_memory_desc_t *md; vmcore_find_descriptor_size() local
[all...]
/kernel/linux/linux-6.6/arch/ia64/kernel/
H A Defi.c269 is_memory_available (efi_memory_desc_t *md) in is_memory_available() argument
271 if (!(md->attribute & EFI_MEMORY_WB)) in is_memory_available()
274 switch (md->type) { in is_memory_available()
293 #define efi_md_size(md) (md->num_pages << EFI_PAGE_SHIFT)
302 efi_md_end(efi_memory_desc_t *md) in efi_md_end() argument
304 return (md->phys_addr + efi_md_size(md)); in efi_md_end()
308 efi_wb(efi_memory_desc_t *md) in efi_wb() argument
310 return (md in efi_wb()
314 efi_uc(efi_memory_desc_t *md) efi_uc() argument
366 efi_memory_desc_t *md; efi_get_pal_addr() local
561 efi_memory_desc_t *md; efi_init() local
606 efi_memory_desc_t *md; efi_enter_virtual_mode() local
691 efi_memory_desc_t *md; efi_get_iobase() local
711 struct kern_memdesc *md; kern_memory_descriptor() local
724 efi_memory_desc_t *md; efi_memory_descriptor() local
744 efi_memory_desc_t *md; efi_memmap_intersects() local
765 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); efi_mem_type() local
775 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); efi_mem_attributes() local
787 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); efi_mem_attribute() local
815 struct kern_memdesc *md; kern_mem_attribute() local
976 efi_memory_desc_t *md, *pmd = NULL, *check_md; find_memmap_space() local
1053 efi_memory_desc_t *md, *pmd = NULL, *check_md; efi_memmap_init() local
1177 efi_memory_desc_t *md; efi_initialize_iomem_resources() local
1288 efi_memory_desc_t *md; kdump_find_rsvd_region() local
1329 efi_memory_desc_t *md; vmcore_find_descriptor_size() local
[all...]
/kernel/linux/linux-5.10/drivers/clk/qcom/
H A Dclk-regmap-mux-div.c23 int mux_div_set_src_div(struct clk_regmap_mux_div *md, u32 src, u32 div) in mux_div_set_src_div() argument
27 const char *name = clk_hw_get_name(&md->clkr.hw); in mux_div_set_src_div()
29 val = (div << md->hid_shift) | (src << md->src_shift); in mux_div_set_src_div()
30 mask = ((BIT(md->hid_width) - 1) << md->hid_shift) | in mux_div_set_src_div()
31 ((BIT(md->src_width) - 1) << md->src_shift); in mux_div_set_src_div()
33 ret = regmap_update_bits(md->clkr.regmap, CFG_RCGR + md in mux_div_set_src_div()
59 mux_div_get_src_div(struct clk_regmap_mux_div *md, u32 *src, u32 *div) mux_div_get_src_div() argument
91 struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw); mux_div_determine_rate() local
127 struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw); __mux_div_set_rate_and_parent() local
165 struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw); mux_div_get_parent() local
181 struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw); mux_div_set_parent() local
189 struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw); mux_div_set_rate() local
197 struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw); mux_div_set_rate_and_parent() local
205 struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw); mux_div_recalc_rate() local
[all...]
/kernel/linux/linux-6.6/drivers/clk/qcom/
H A Dclk-regmap-mux-div.c23 int mux_div_set_src_div(struct clk_regmap_mux_div *md, u32 src, u32 div) in mux_div_set_src_div() argument
27 const char *name = clk_hw_get_name(&md->clkr.hw); in mux_div_set_src_div()
29 val = (div << md->hid_shift) | (src << md->src_shift); in mux_div_set_src_div()
30 mask = ((BIT(md->hid_width) - 1) << md->hid_shift) | in mux_div_set_src_div()
31 ((BIT(md->src_width) - 1) << md->src_shift); in mux_div_set_src_div()
33 ret = regmap_update_bits(md->clkr.regmap, CFG_RCGR + md in mux_div_set_src_div()
59 mux_div_get_src_div(struct clk_regmap_mux_div *md, u32 *src, u32 *div) mux_div_get_src_div() argument
91 struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw); mux_div_determine_rate() local
127 struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw); __mux_div_set_rate_and_parent() local
165 struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw); mux_div_get_parent() local
181 struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw); mux_div_set_parent() local
189 struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw); mux_div_set_rate() local
197 struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw); mux_div_set_rate_and_parent() local
205 struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw); mux_div_recalc_rate() local
[all...]
/kernel/linux/linux-5.10/arch/mips/pci/
H A Dmsi-xlp.c134 struct xlp_msi_data *md = irq_data_get_irq_chip_data(d); in xlp_msi_enable() local
139 spin_lock_irqsave(&md->msi_lock, flags); in xlp_msi_enable()
140 md->msi_enabled_mask |= 1u << vec; in xlp_msi_enable()
142 nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_EN, in xlp_msi_enable()
143 md->msi_enabled_mask); in xlp_msi_enable()
145 nlm_write_reg(md->lnkbase, PCIE_MSI_EN, md->msi_enabled_mask); in xlp_msi_enable()
146 spin_unlock_irqrestore(&md->msi_lock, flags); in xlp_msi_enable()
151 struct xlp_msi_data *md = irq_data_get_irq_chip_data(d); in xlp_msi_disable() local
156 spin_lock_irqsave(&md in xlp_msi_disable()
168 struct xlp_msi_data *md = irq_data_get_irq_chip_data(d); xlp_msi_mask_ack() local
207 struct xlp_msi_data *md; xlp_msix_mask_ack() local
296 struct xlp_msi_data *md; xlp_setup_msi() local
403 struct xlp_msi_data *md; xlp_setup_msix() local
469 struct xlp_msi_data *md; xlp_init_node_msi_irqs() local
517 struct xlp_msi_data *md; nlm_dispatch_msi() local
547 struct xlp_msi_data *md; nlm_dispatch_msix() local
[all...]
/kernel/linux/linux-5.10/arch/arm64/kernel/
H A Defi.c15 static bool region_is_misaligned(const efi_memory_desc_t *md) in region_is_misaligned() argument
19 return !PAGE_ALIGNED(md->phys_addr) || in region_is_misaligned()
20 !PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT); in region_is_misaligned()
28 static __init pteval_t create_mapping_protection(efi_memory_desc_t *md) in create_mapping_protection() argument
30 u64 attr = md->attribute; in create_mapping_protection()
31 u32 type = md->type; in create_mapping_protection()
36 if (region_is_misaligned(md)) { in create_mapping_protection()
75 int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) in efi_create_mapping() argument
77 pteval_t prot_val = create_mapping_protection(md); in efi_create_mapping()
78 bool page_mappings_only = (md in efi_create_mapping()
100 efi_memory_desc_t *md = data; set_permissions() local
111 efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md) efi_set_mapping_permissions() argument
[all...]

Completed in 27 milliseconds

12345678910>>...17