Lines Matching refs:mds
123 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
131 mutex_lock(&mds->mbox_mutex);
132 if (mds->security.sanitize_node)
133 mod_delayed_work(system_wq, &mds->security.poll_dwork, 0);
134 mutex_unlock(&mds->mbox_mutex);
137 rcuwait_wake_up(&mds->mbox_wait);
148 struct cxl_memdev_state *mds =
149 container_of(work, typeof(*mds), security.poll_dwork.work);
150 struct cxl_dev_state *cxlds = &mds->cxlds;
152 mutex_lock(&mds->mbox_mutex);
154 mds->security.poll_tmo_secs = 0;
155 if (mds->security.sanitize_node)
156 sysfs_notify_dirent(mds->security.sanitize_node);
157 mds->security.sanitize_active = false;
161 int timeout = mds->security.poll_tmo_secs + 10;
163 mds->security.poll_tmo_secs = min(15 * 60, timeout);
164 schedule_delayed_work(&mds->security.poll_dwork, timeout * HZ);
166 mutex_unlock(&mds->mbox_mutex);
171 * @mds: The memory device driver data
191 static int __cxl_pci_mbox_send_cmd(struct cxl_memdev_state *mds,
194 struct cxl_dev_state *cxlds = &mds->cxlds;
201 lockdep_assert_held(&mds->mbox_mutex);
235 if (mds->security.poll_tmo_secs > 0) {
296 if (mds->security.sanitize_active)
301 mds->security.poll_tmo_secs = timeout;
302 mds->security.sanitize_active = true;
303 schedule_delayed_work(&mds->security.poll_dwork,
314 if (rcuwait_wait_event_timeout(&mds->mbox_wait,
359 n = min3(mbox_cmd->size_out, mds->payload_size, out_len);
369 static int cxl_pci_mbox_send(struct cxl_memdev_state *mds,
374 mutex_lock_io(&mds->mbox_mutex);
375 rc = __cxl_pci_mbox_send_cmd(mds, cmd);
376 mutex_unlock(&mds->mbox_mutex);
381 static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
383 struct cxl_dev_state *cxlds = &mds->cxlds;
416 mds->mbox_send = cxl_pci_mbox_send;
417 mds->payload_size =
427 mds->payload_size = min_t(size_t, mds->payload_size, SZ_1M);
428 if (mds->payload_size < 256) {
430 mds->payload_size);
434 dev_dbg(dev, "Mailbox payload sized %zu", mds->payload_size);
436 rcuwait_init(&mds->mbox_wait);
437 INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);
573 * share this buffer protected by the mds->event_log_lock.
575 static int cxl_mem_alloc_event_buf(struct cxl_memdev_state *mds)
579 buf = kvmalloc(mds->payload_size, GFP_KERNEL);
582 mds->event.buf = buf;
584 return devm_add_action_or_reset(mds->cxlds.dev, free_event_buf, buf);
613 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
626 cxl_mem_get_event_records(mds, status);
649 static int cxl_event_get_int_policy(struct cxl_memdev_state *mds,
659 rc = cxl_internal_send_cmd(mds, &mbox_cmd);
661 dev_err(mds->cxlds.dev,
667 static int cxl_event_config_msgnums(struct cxl_memdev_state *mds,
686 rc = cxl_internal_send_cmd(mds, &mbox_cmd);
688 dev_err(mds->cxlds.dev, "Failed to set event interrupt policy : %d",
694 return cxl_event_get_int_policy(mds, policy);
697 static int cxl_event_irqsetup(struct cxl_memdev_state *mds)
699 struct cxl_dev_state *cxlds = &mds->cxlds;
703 rc = cxl_event_config_msgnums(mds, &policy);
742 struct cxl_memdev_state *mds)
754 rc = cxl_mem_alloc_event_buf(mds);
758 rc = cxl_event_get_int_policy(mds, &policy);
766 dev_err(mds->cxlds.dev,
771 rc = cxl_event_irqsetup(mds);
775 cxl_mem_get_event_records(mds, CXLDEV_EVENT_STATUS_ALL);
783 struct cxl_memdev_state *mds;
801 mds = cxl_memdev_state_create(&pdev->dev);
802 if (IS_ERR(mds))
803 return PTR_ERR(mds);
804 cxlds = &mds->cxlds;
851 rc = cxl_pci_setup_mailbox(mds);
855 rc = cxl_enumerate_cmds(mds);
859 rc = cxl_set_timestamp(mds);
863 rc = cxl_poison_state_init(mds);
867 rc = cxl_dev_state_identify(mds);
871 rc = cxl_mem_create_range_info(mds);
879 rc = devm_cxl_setup_fw_upload(&pdev->dev, mds);
910 rc = cxl_event_config(host_bridge, mds);