Lines Matching refs:intf
44 static void handle_new_recv_msgs(struct ipmi_smi *intf);
45 static void need_waiter(struct ipmi_smi *intf);
46 static int handle_one_recv_msg(struct ipmi_smi *intf,
197 struct ipmi_smi *intf;
328 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
576 void (*null_user_handler)(struct ipmi_smi *intf,
603 static void __get_guid(struct ipmi_smi *intf);
604 static void __ipmi_bmc_unregister(struct ipmi_smi *intf);
605 static int __ipmi_bmc_register(struct ipmi_smi *intf,
608 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
637 #define ipmi_inc_stat(intf, stat) \
638 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
639 #define ipmi_get_stat(intf, stat) \
640 ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
690 static void clean_up_interface_data(struct ipmi_smi *intf)
696 tasklet_kill(&intf->recv_tasklet);
698 free_smi_msg_list(&intf->waiting_rcv_msgs);
699 free_recv_msg_list(&intf->waiting_events);
705 mutex_lock(&intf->cmd_rcvrs_mutex);
707 list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
708 mutex_unlock(&intf->cmd_rcvrs_mutex);
714 if ((intf->seq_table[i].inuse)
715 && (intf->seq_table[i].recv_msg))
716 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
722 struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
724 clean_up_interface_data(intf);
725 kfree(intf);
730 struct ipmi_smi *intf;
736 struct ipmi_smi *intf;
752 list_for_each_entry_rcu(intf, &ipmi_interfaces, link,
754 int intf_num = READ_ONCE(intf->intf_num);
758 watcher->new_smi(intf_num, intf->si_dev);
886 static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
892 if (intf->null_user_handler) {
893 intf->null_user_handler(intf, msg);
923 static void deliver_local_response(struct ipmi_smi *intf,
926 if (deliver_response(intf, msg))
927 ipmi_inc_stat(intf, unhandled_local_responses);
929 ipmi_inc_stat(intf, handled_local_responses);
932 static void deliver_err_response(struct ipmi_smi *intf,
940 deliver_local_response(intf, msg);
943 static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags)
947 if (!intf->handlers->set_need_watch)
950 spin_lock_irqsave(&intf->watch_lock, iflags);
952 intf->response_waiters++;
955 intf->watchdog_waiters++;
958 intf->command_waiters++;
960 if ((intf->last_watch_mask & flags) != flags) {
961 intf->last_watch_mask |= flags;
962 intf->handlers->set_need_watch(intf->send_info,
963 intf->last_watch_mask);
965 spin_unlock_irqrestore(&intf->watch_lock, iflags);
968 static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags)
972 if (!intf->handlers->set_need_watch)
975 spin_lock_irqsave(&intf->watch_lock, iflags);
977 intf->response_waiters--;
980 intf->watchdog_waiters--;
983 intf->command_waiters--;
986 if (intf->response_waiters)
988 if (intf->watchdog_waiters)
990 if (intf->command_waiters)
993 if (intf->last_watch_mask != flags) {
994 intf->last_watch_mask = flags;
995 intf->handlers->set_need_watch(intf->send_info,
996 intf->last_watch_mask);
998 spin_unlock_irqrestore(&intf->watch_lock, iflags);
1006 static int intf_next_seq(struct ipmi_smi *intf,
1022 for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
1024 if (!intf->seq_table[i].inuse)
1028 if (!intf->seq_table[i].inuse) {
1029 intf->seq_table[i].recv_msg = recv_msg;
1035 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
1036 intf->seq_table[i].orig_timeout = timeout;
1037 intf->seq_table[i].retries_left = retries;
1038 intf->seq_table[i].broadcast = broadcast;
1039 intf->seq_table[i].inuse = 1;
1040 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
1042 *seqid = intf->seq_table[i].seqid;
1043 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
1044 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1045 need_waiter(intf);
1060 static int intf_find_seq(struct ipmi_smi *intf,
1074 spin_lock_irqsave(&intf->seq_lock, flags);
1075 if (intf->seq_table[seq].inuse) {
1076 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
1082 intf->seq_table[seq].inuse = 0;
1083 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1087 spin_unlock_irqrestore(&intf->seq_lock, flags);
1094 static int intf_start_seq_timer(struct ipmi_smi *intf,
1105 spin_lock_irqsave(&intf->seq_lock, flags);
1110 if ((intf->seq_table[seq].inuse)
1111 && (intf->seq_table[seq].seqid == seqid)) {
1112 struct seq_table *ent = &intf->seq_table[seq];
1116 spin_unlock_irqrestore(&intf->seq_lock, flags);
1122 static int intf_err_seq(struct ipmi_smi *intf,
1135 spin_lock_irqsave(&intf->seq_lock, flags);
1140 if ((intf->seq_table[seq].inuse)
1141 && (intf->seq_table[seq].seqid == seqid)) {
1142 struct seq_table *ent = &intf->seq_table[seq];
1145 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1149 spin_unlock_irqrestore(&intf->seq_lock, flags);
1152 deliver_err_response(intf, msg, err);
1174 struct ipmi_smi *intf;
1200 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1201 if (intf->intf_num == if_num)
1215 if (!try_module_get(intf->owner)) {
1221 kref_get(&intf->refcount);
1226 new_user->intf = intf;
1230 spin_lock_irqsave(&intf->seq_lock, flags);
1231 list_add_rcu(&new_user->link, &intf->users);
1232 spin_unlock_irqrestore(&intf->seq_lock, flags);
1235 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1250 struct ipmi_smi *intf;
1253 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1254 if (intf->intf_num == if_num)
1263 if (!intf->handlers->get_smi_info)
1266 rv = intf->handlers->get_smi_info(intf->send_info, data);
1283 struct ipmi_smi *intf = user->intf;
1308 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1311 atomic_dec(&intf->event_waiters);
1314 spin_lock_irqsave(&intf->seq_lock, flags);
1318 if (intf->seq_table[i].inuse
1319 && (intf->seq_table[i].recv_msg->user == user)) {
1320 intf->seq_table[i].inuse = 0;
1321 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1322 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
1325 spin_unlock_irqrestore(&intf->seq_lock, flags);
1333 mutex_lock(&intf->cmd_rcvrs_mutex);
1334 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1335 lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1342 mutex_unlock(&intf->cmd_rcvrs_mutex);
1350 owner = intf->owner;
1351 kref_put(&intf->refcount, intf_free);
1376 rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL);
1401 user->intf->addrinfo[channel].address = address;
1423 *address = user->intf->addrinfo[channel].address;
1445 user->intf->addrinfo[channel].lun = LUN & 0x3;
1467 *address = user->intf->addrinfo[channel].lun;
1484 spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1485 mode = user->intf->maintenance_mode;
1486 spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1493 static void maintenance_mode_update(struct ipmi_smi *intf)
1495 if (intf->handlers->set_maintenance_mode)
1496 intf->handlers->set_maintenance_mode(
1497 intf->send_info, intf->maintenance_mode_enable);
1504 struct ipmi_smi *intf = user->intf;
1510 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1511 if (intf->maintenance_mode != mode) {
1514 intf->maintenance_mode_enable
1515 = (intf->auto_maintenance_timeout > 0);
1519 intf->maintenance_mode_enable = false;
1523 intf->maintenance_mode_enable = true;
1530 intf->maintenance_mode = mode;
1532 maintenance_mode_update(intf);
1535 spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1545 struct ipmi_smi *intf = user->intf;
1556 spin_lock_irqsave(&intf->events_lock, flags);
1563 if (atomic_inc_return(&intf->event_waiters) == 1)
1564 need_waiter(intf);
1566 atomic_dec(&intf->event_waiters);
1569 if (intf->delivering_events)
1577 while (user->gets_events && !list_empty(&intf->waiting_events)) {
1578 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1580 intf->waiting_events_count = 0;
1581 if (intf->event_msg_printed) {
1582 dev_warn(intf->si_dev, "Event queue no longer full\n");
1583 intf->event_msg_printed = 0;
1586 intf->delivering_events = 1;
1587 spin_unlock_irqrestore(&intf->events_lock, flags);
1592 deliver_local_response(intf, msg);
1595 spin_lock_irqsave(&intf->events_lock, flags);
1596 intf->delivering_events = 0;
1600 spin_unlock_irqrestore(&intf->events_lock, flags);
1607 static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf,
1614 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1615 lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1623 static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf,
1630 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1631 lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1644 struct ipmi_smi *intf = user->intf;
1662 mutex_lock(&intf->cmd_rcvrs_mutex);
1664 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1669 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1671 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1674 mutex_unlock(&intf->cmd_rcvrs_mutex);
1689 struct ipmi_smi *intf = user->intf;
1698 mutex_lock(&intf->cmd_rcvrs_mutex);
1702 rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1715 mutex_unlock(&intf->cmd_rcvrs_mutex);
1719 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1819 static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf,
1823 if (intf->curr_msg) {
1825 list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
1827 list_add_tail(&smi_msg->link, &intf->xmit_msgs);
1830 intf->curr_msg = smi_msg;
1836 static void smi_send(struct ipmi_smi *intf,
1840 int run_to_completion = intf->run_to_completion;
1844 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
1845 smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1848 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
1851 handlers->sender(intf->send_info, smi_msg);
1862 static int i_ipmi_req_sysintf(struct ipmi_smi *intf,
1879 ipmi_inc_stat(intf, sent_invalid_commands);
1893 ipmi_inc_stat(intf, sent_invalid_commands);
1900 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1901 intf->auto_maintenance_timeout
1903 if (!intf->maintenance_mode
1904 && !intf->maintenance_mode_enable) {
1905 intf->maintenance_mode_enable = true;
1906 maintenance_mode_update(intf);
1908 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1913 ipmi_inc_stat(intf, sent_invalid_commands);
1924 ipmi_inc_stat(intf, sent_local_commands);
1929 static int i_ipmi_req_ipmb(struct ipmi_smi *intf,
1948 ipmi_inc_stat(intf, sent_invalid_commands);
1952 chans = READ_ONCE(intf->channel_list)->c;
1955 ipmi_inc_stat(intf, sent_invalid_commands);
1975 ipmi_inc_stat(intf, sent_invalid_commands);
1981 ipmi_inc_stat(intf, sent_invalid_commands);
1992 ipmi_inc_stat(intf, sent_ipmb_responses);
2006 spin_lock_irqsave(&intf->seq_lock, flags);
2009 intf->ipmb_maintenance_mode_timeout =
2012 if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0)
2020 rv = intf_next_seq(intf,
2034 ipmi_inc_stat(intf, sent_ipmb_commands);
2064 spin_unlock_irqrestore(&intf->seq_lock, flags);
2070 static int i_ipmi_req_lan(struct ipmi_smi *intf,
2087 ipmi_inc_stat(intf, sent_invalid_commands);
2091 chans = READ_ONCE(intf->channel_list)->c;
2097 ipmi_inc_stat(intf, sent_invalid_commands);
2103 ipmi_inc_stat(intf, sent_invalid_commands);
2109 ipmi_inc_stat(intf, sent_invalid_commands);
2120 ipmi_inc_stat(intf, sent_lan_responses);
2133 spin_lock_irqsave(&intf->seq_lock, flags);
2139 rv = intf_next_seq(intf,
2153 ipmi_inc_stat(intf, sent_lan_commands);
2182 spin_unlock_irqrestore(&intf->seq_lock, flags);
2195 struct ipmi_smi *intf,
2236 if (intf->in_shutdown) {
2253 rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg,
2256 rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg,
2260 rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg,
2264 ipmi_inc_stat(intf, sent_invalid_commands);
2275 smi_send(intf, intf->handlers, smi_msg, priority);
2283 static int check_addr(struct ipmi_smi *intf,
2291 *lun = intf->addrinfo[addr->channel].lun;
2292 *saddr = intf->addrinfo[addr->channel].address;
2315 rv = check_addr(user->intf, addr, &saddr, &lun);
2318 user->intf,
2354 rv = check_addr(user->intf, addr, &saddr, &lun);
2357 user->intf,
2374 static void bmc_device_id_handler(struct ipmi_smi *intf,
2382 dev_warn(intf->si_dev,
2389 msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
2391 dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv);
2393 intf->bmc->cc = msg->msg.data[0];
2394 intf->bmc->dyn_id_set = 0;
2401 intf->bmc->dyn_id_set = 1;
2404 wake_up(&intf->waitq);
2408 send_get_device_id_cmd(struct ipmi_smi *intf)
2423 intf,
2427 intf,
2431 intf->addrinfo[0].address,
2432 intf->addrinfo[0].lun,
2436 static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
2441 intf->null_user_handler = bmc_device_id_handler;
2447 rv = send_get_device_id_cmd(intf);
2451 wait_event(intf->waitq, bmc->dyn_id_set != 2);
2459 dev_warn(intf->si_dev,
2472 intf->null_user_handler = NULL;
2479 * bmc or intf, this code will get the other one. If the data has
2486 static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2492 bool intf_set = intf != NULL;
2494 if (!intf) {
2501 intf = list_first_entry(&bmc->intfs, struct ipmi_smi,
2503 kref_get(&intf->refcount);
2505 mutex_lock(&intf->bmc_reg_mutex);
2507 if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi,
2509 mutex_unlock(&intf->bmc_reg_mutex);
2510 kref_put(&intf->refcount, intf_free);
2514 mutex_lock(&intf->bmc_reg_mutex);
2515 bmc = intf->bmc;
2517 kref_get(&intf->refcount);
2521 if (intf->in_bmc_register ||
2526 __get_guid(intf);
2529 rv = __get_device_id(intf, bmc);
2537 if (!intf->bmc_registered
2552 __ipmi_bmc_unregister(intf);
2554 intf->bmc->id = id;
2555 intf->bmc->dyn_guid_set = guid_set;
2556 intf->bmc->guid = guid;
2557 if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
2558 need_waiter(intf); /* Retry later on an error. */
2560 __scan_channels(intf, &id);
2569 mutex_unlock(&intf->bmc_reg_mutex);
2575 bmc = intf->bmc;
2580 __scan_channels(intf, &bmc->fetch_id);
2613 mutex_unlock(&intf->bmc_reg_mutex);
2615 kref_put(&intf->refcount, intf_free);
2619 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2623 return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1);
2955 * Must be called with intf->bmc_reg_mutex held.
2957 static void __ipmi_bmc_unregister(struct ipmi_smi *intf)
2959 struct bmc_device *bmc = intf->bmc;
2961 if (!intf->bmc_registered)
2964 sysfs_remove_link(&intf->si_dev->kobj, "bmc");
2965 sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
2966 kfree(intf->my_dev_name);
2967 intf->my_dev_name = NULL;
2970 list_del(&intf->bmc_link);
2972 intf->bmc = &intf->tmp_bmc;
2974 intf->bmc_registered = false;
2977 static void ipmi_bmc_unregister(struct ipmi_smi *intf)
2979 mutex_lock(&intf->bmc_reg_mutex);
2980 __ipmi_bmc_unregister(intf);
2981 mutex_unlock(&intf->bmc_reg_mutex);
2985 * Must be called with intf->bmc_reg_mutex held.
2987 static int __ipmi_bmc_register(struct ipmi_smi *intf,
3001 intf->in_bmc_register = true;
3002 mutex_unlock(&intf->bmc_reg_mutex);
3026 intf->bmc = old_bmc;
3028 list_add_tail(&intf->bmc_link, &bmc->intfs);
3031 dev_info(intf->si_dev,
3066 intf->bmc = bmc;
3068 list_add_tail(&intf->bmc_link, &bmc->intfs);
3073 dev_err(intf->si_dev,
3079 dev_info(intf->si_dev,
3090 rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
3092 dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv);
3097 intf_num = intf->intf_num;
3098 intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num);
3099 if (!intf->my_dev_name) {
3101 dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n",
3106 rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
3107 intf->my_dev_name);
3109 dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n",
3114 intf->bmc_registered = true;
3118 mutex_lock(&intf->bmc_reg_mutex);
3119 intf->in_bmc_register = false;
3124 kfree(intf->my_dev_name);
3125 intf->my_dev_name = NULL;
3128 sysfs_remove_link(&intf->si_dev->kobj, "bmc");
3132 list_del(&intf->bmc_link);
3134 intf->bmc = &intf->tmp_bmc;
3140 list_del(&intf->bmc_link);
3142 intf->bmc = &intf->tmp_bmc;
3148 send_guid_cmd(struct ipmi_smi *intf, int chan)
3162 intf,
3166 intf,
3170 intf->addrinfo[0].address,
3171 intf->addrinfo[0].lun,
3175 static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3177 struct bmc_device *bmc = intf->bmc;
3193 dev_warn(intf->si_dev,
3207 wake_up(&intf->waitq);
3210 static void __get_guid(struct ipmi_smi *intf)
3213 struct bmc_device *bmc = intf->bmc;
3216 intf->null_user_handler = guid_handler;
3217 rv = send_guid_cmd(intf, 0);
3222 wait_event(intf->waitq, bmc->dyn_guid_set != 2);
3227 intf->null_user_handler = NULL;
3231 send_channel_info_cmd(struct ipmi_smi *intf, int chan)
3247 intf,
3251 intf,
3255 intf->addrinfo[0].address,
3256 intf->addrinfo[0].lun,
3261 channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3265 unsigned int set = intf->curr_working_cset;
3281 intf->wchannels[set].c[0].medium
3283 intf->wchannels[set].c[0].protocol
3286 intf->channel_list = intf->wchannels + set;
3287 intf->channels_ready = true;
3288 wake_up(&intf->waitq);
3297 ch = intf->curr_channel;
3298 chans = intf->wchannels[set].c;
3303 intf->curr_channel++;
3304 if (intf->curr_channel >= IPMI_MAX_CHANNELS) {
3305 intf->channel_list = intf->wchannels + set;
3306 intf->channels_ready = true;
3307 wake_up(&intf->waitq);
3309 intf->channel_list = intf->wchannels + set;
3310 intf->channels_ready = true;
3311 rv = send_channel_info_cmd(intf, intf->curr_channel);
3316 dev_warn(intf->si_dev,
3318 intf->curr_channel, rv);
3320 intf->channel_list = intf->wchannels + set;
3321 intf->channels_ready = true;
3322 wake_up(&intf->waitq);
3330 * Must be holding intf->bmc_reg_mutex to call this.
3332 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id)
3345 set = !intf->curr_working_cset;
3346 intf->curr_working_cset = set;
3347 memset(&intf->wchannels[set], 0,
3350 intf->null_user_handler = channel_handler;
3351 intf->curr_channel = 0;
3352 rv = send_channel_info_cmd(intf, 0);
3354 dev_warn(intf->si_dev,
3357 intf->null_user_handler = NULL;
3362 wait_event(intf->waitq, intf->channels_ready);
3363 intf->null_user_handler = NULL;
3365 unsigned int set = intf->curr_working_cset;
3368 intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
3369 intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
3370 intf->channel_list = intf->wchannels + set;
3371 intf->channels_ready = true;
3377 static void ipmi_poll(struct ipmi_smi *intf)
3379 if (intf->handlers->poll)
3380 intf->handlers->poll(intf->send_info);
3382 handle_new_recv_msgs(intf);
3387 ipmi_poll(user->intf);
3393 struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
3396 if (!intf->in_shutdown)
3397 bmc_get_device_id(intf, NULL, NULL, NULL, NULL);
3399 kref_put(&intf->refcount, intf_free);
3410 struct ipmi_smi *intf, *tintf;
3422 intf = kzalloc(sizeof(*intf), GFP_KERNEL);
3423 if (!intf)
3426 rv = init_srcu_struct(&intf->users_srcu);
3428 kfree(intf);
3432 intf->owner = owner;
3433 intf->bmc = &intf->tmp_bmc;
3434 INIT_LIST_HEAD(&intf->bmc->intfs);
3435 mutex_init(&intf->bmc->dyn_mutex);
3436 INIT_LIST_HEAD(&intf->bmc_link);
3437 mutex_init(&intf->bmc_reg_mutex);
3438 intf->intf_num = -1; /* Mark it invalid for now. */
3439 kref_init(&intf->refcount);
3440 INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg);
3441 intf->si_dev = si_dev;
3443 intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR;
3444 intf->addrinfo[j].lun = 2;
3447 intf->addrinfo[0].address = slave_addr;
3448 INIT_LIST_HEAD(&intf->users);
3449 intf->handlers = handlers;
3450 intf->send_info = send_info;
3451 spin_lock_init(&intf->seq_lock);
3453 intf->seq_table[j].inuse = 0;
3454 intf->seq_table[j].seqid = 0;
3456 intf->curr_seq = 0;
3457 spin_lock_init(&intf->waiting_rcv_msgs_lock);
3458 INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
3459 tasklet_setup(&intf->recv_tasklet,
3461 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
3462 spin_lock_init(&intf->xmit_msgs_lock);
3463 INIT_LIST_HEAD(&intf->xmit_msgs);
3464 INIT_LIST_HEAD(&intf->hp_xmit_msgs);
3465 spin_lock_init(&intf->events_lock);
3466 spin_lock_init(&intf->watch_lock);
3467 atomic_set(&intf->event_waiters, 0);
3468 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3469 INIT_LIST_HEAD(&intf->waiting_events);
3470 intf->waiting_events_count = 0;
3471 mutex_init(&intf->cmd_rcvrs_mutex);
3472 spin_lock_init(&intf->maintenance_mode_lock);
3473 INIT_LIST_HEAD(&intf->cmd_rcvrs);
3474 init_waitqueue_head(&intf->waitq);
3476 atomic_set(&intf->stats[i], 0);
3492 list_add_rcu(&intf->link, &ipmi_interfaces);
3494 list_add_tail_rcu(&intf->link, link);
3496 rv = handlers->start_processing(send_info, intf);
3500 rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
3506 mutex_lock(&intf->bmc_reg_mutex);
3507 rv = __scan_channels(intf, &id);
3508 mutex_unlock(&intf->bmc_reg_mutex);
3518 intf->intf_num = i;
3522 call_smi_watchers(i, intf->si_dev);
3527 ipmi_bmc_unregister(intf);
3529 if (intf->handlers->shutdown)
3530 intf->handlers->shutdown(intf->send_info);
3532 list_del_rcu(&intf->link);
3535 cleanup_srcu_struct(&intf->users_srcu);
3536 kref_put(&intf->refcount, intf_free);
3542 static void deliver_smi_err_response(struct ipmi_smi *intf,
3553 rv = handle_one_recv_msg(intf, msg);
3558 static void cleanup_smi_msgs(struct ipmi_smi *intf)
3568 list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
3569 list_splice_tail(&intf->xmit_msgs, &tmplist);
3572 while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
3587 deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
3591 ent = &intf->seq_table[i];
3594 deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED);
3598 void ipmi_unregister_smi(struct ipmi_smi *intf)
3601 int intf_num = intf->intf_num, index;
3604 intf->intf_num = -1;
3605 intf->in_shutdown = true;
3606 list_del_rcu(&intf->link);
3621 index = srcu_read_lock(&intf->users_srcu);
3622 while (!list_empty(&intf->users)) {
3624 container_of(list_next_rcu(&intf->users),
3629 srcu_read_unlock(&intf->users_srcu, index);
3631 if (intf->handlers->shutdown)
3632 intf->handlers->shutdown(intf->send_info);
3634 cleanup_smi_msgs(intf);
3636 ipmi_bmc_unregister(intf);
3638 cleanup_srcu_struct(&intf->users_srcu);
3639 kref_put(&intf->refcount, intf_free);
3643 static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf,
3655 ipmi_inc_stat(intf, invalid_ipmb_responses);
3673 if (intf_find_seq(intf,
3684 ipmi_inc_stat(intf, unhandled_ipmb_responses);
3698 if (deliver_response(intf, recv_msg))
3699 ipmi_inc_stat(intf, unhandled_ipmb_responses);
3701 ipmi_inc_stat(intf, handled_ipmb_responses);
3706 static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
3720 ipmi_inc_stat(intf, invalid_commands);
3734 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3744 ipmi_inc_stat(intf, unhandled_commands);
3752 msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address;
3763 if (!intf->in_shutdown) {
3764 smi_send(intf, intf->handlers, msg, 0);
3809 if (deliver_response(intf, recv_msg))
3810 ipmi_inc_stat(intf, unhandled_commands);
3812 ipmi_inc_stat(intf, handled_commands);
3819 static int handle_lan_get_msg_rsp(struct ipmi_smi *intf,
3832 ipmi_inc_stat(intf, invalid_lan_responses);
3853 if (intf_find_seq(intf,
3864 ipmi_inc_stat(intf, unhandled_lan_responses);
3878 if (deliver_response(intf, recv_msg))
3879 ipmi_inc_stat(intf, unhandled_lan_responses);
3881 ipmi_inc_stat(intf, handled_lan_responses);
3886 static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
3900 ipmi_inc_stat(intf, invalid_commands);
3914 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3924 ipmi_inc_stat(intf, unhandled_commands);
3969 if (deliver_response(intf, recv_msg))
3970 ipmi_inc_stat(intf, unhandled_commands);
3972 ipmi_inc_stat(intf, handled_commands);
3985 static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
4003 ipmi_inc_stat(intf, invalid_commands);
4021 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
4031 ipmi_inc_stat(intf, unhandled_commands);
4076 if (deliver_response(intf, recv_msg))
4077 ipmi_inc_stat(intf, unhandled_commands);
4079 ipmi_inc_stat(intf, handled_commands);
4104 static int handle_read_event_rsp(struct ipmi_smi *intf,
4115 ipmi_inc_stat(intf, invalid_events);
4126 spin_lock_irqsave(&intf->events_lock, flags);
4128 ipmi_inc_stat(intf, events);
4134 index = srcu_read_lock(&intf->users_srcu);
4135 list_for_each_entry_rcu(user, &intf->users, link) {
4163 srcu_read_unlock(&intf->users_srcu, index);
4169 deliver_local_response(intf, recv_msg);
4171 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
4188 list_add_tail(&recv_msg->link, &intf->waiting_events);
4189 intf->waiting_events_count++;
4190 } else if (!intf->event_msg_printed) {
4195 dev_warn(intf->si_dev,
4197 intf->event_msg_printed = 1;
4201 spin_unlock_irqrestore(&intf->events_lock, flags);
4206 static int handle_bmc_rsp(struct ipmi_smi *intf,
4214 dev_warn(intf->si_dev,
4231 deliver_local_response(intf, recv_msg);
4241 static int handle_one_recv_msg(struct ipmi_smi *intf,
4254 if (intf->in_shutdown)
4280 chans = READ_ONCE(intf->channel_list)->c;
4283 ipmi_inc_stat(intf, sent_lan_command_errs);
4285 ipmi_inc_stat(intf, sent_ipmb_command_errs);
4286 intf_err_seq(intf, msg->msgid, msg->rsp[2]);
4289 intf_start_seq_timer(intf, msg->msgid);
4296 dev_warn(intf->si_dev,
4311 dev_warn(intf->si_dev,
4349 deliver_local_response(intf, recv_msg);
4368 if (!intf->channels_ready) {
4373 chans = READ_ONCE(intf->channel_list)->c;
4382 requeue = handle_ipmb_get_msg_rsp(intf, msg);
4388 requeue = handle_ipmb_get_msg_cmd(intf, msg);
4399 requeue = handle_lan_get_msg_rsp(intf, msg);
4405 requeue = handle_lan_get_msg_cmd(intf, msg);
4415 requeue = handle_oem_get_msg_cmd(intf, msg);
4428 requeue = handle_read_event_rsp(intf, msg);
4431 requeue = handle_bmc_rsp(intf, msg);
4441 static void handle_new_recv_msgs(struct ipmi_smi *intf)
4446 int run_to_completion = intf->run_to_completion;
4450 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4451 while (!list_empty(&intf->waiting_rcv_msgs)) {
4452 smi_msg = list_entry(intf->waiting_rcv_msgs.next,
4456 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4458 rv = handle_one_recv_msg(intf, smi_msg);
4460 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4469 list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
4479 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
4485 if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
4489 index = srcu_read_lock(&intf->users_srcu);
4490 list_for_each_entry_rcu(user, &intf->users, link) {
4495 srcu_read_unlock(&intf->users_srcu, index);
4502 struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet);
4503 int run_to_completion = intf->run_to_completion;
4517 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4518 if (intf->curr_msg == NULL && !intf->in_shutdown) {
4522 if (!list_empty(&intf->hp_xmit_msgs))
4523 entry = intf->hp_xmit_msgs.next;
4524 else if (!list_empty(&intf->xmit_msgs))
4525 entry = intf->xmit_msgs.next;
4530 intf->curr_msg = newmsg;
4535 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4537 intf->handlers->sender(intf->send_info, newmsg);
4541 handle_new_recv_msgs(intf);
4545 void ipmi_smi_msg_received(struct ipmi_smi *intf,
4549 int run_to_completion = intf->run_to_completion;
4556 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4557 list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
4559 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4563 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4568 if (msg == intf->curr_msg)
4569 intf->curr_msg = NULL;
4571 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4574 smi_recv_tasklet(&intf->recv_tasklet);
4576 tasklet_schedule(&intf->recv_tasklet);
4580 void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
4582 if (intf->in_shutdown)
4585 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
4586 tasklet_schedule(&intf->recv_tasklet);
4591 smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
4611 static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
4619 if (intf->in_shutdown)
4634 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
4638 ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
4640 ipmi_inc_stat(intf, timed_out_lan_commands);
4642 ipmi_inc_stat(intf, timed_out_ipmb_commands);
4655 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
4659 ipmi_inc_stat(intf,
4662 ipmi_inc_stat(intf,
4667 spin_unlock_irqrestore(&intf->seq_lock, *flags);
4676 if (intf->handlers) {
4678 ipmi_inc_stat(intf,
4681 ipmi_inc_stat(intf,
4684 smi_send(intf, intf->handlers, smi_msg, 0);
4688 spin_lock_irqsave(&intf->seq_lock, *flags);
4692 static bool ipmi_timeout_handler(struct ipmi_smi *intf,
4701 if (!intf->bmc_registered) {
4702 kref_get(&intf->refcount);
4703 if (!schedule_work(&intf->bmc_reg_work)) {
4704 kref_put(&intf->refcount, intf_free);
4715 spin_lock_irqsave(&intf->seq_lock, flags);
4716 if (intf->ipmb_maintenance_mode_timeout) {
4717 if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
4718 intf->ipmb_maintenance_mode_timeout = 0;
4720 intf->ipmb_maintenance_mode_timeout -= timeout_period;
4723 check_msg_timeout(intf, &intf->seq_table[i],
4726 spin_unlock_irqrestore(&intf->seq_lock, flags);
4729 deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
4739 if (intf->auto_maintenance_timeout > 0) {
4740 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
4741 if (intf->auto_maintenance_timeout > 0) {
4742 intf->auto_maintenance_timeout
4744 if (!intf->maintenance_mode
4745 && (intf->auto_maintenance_timeout <= 0)) {
4746 intf->maintenance_mode_enable = false;
4747 maintenance_mode_update(intf);
4750 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
4754 tasklet_schedule(&intf->recv_tasklet);
4759 static void ipmi_request_event(struct ipmi_smi *intf)
4762 if (intf->maintenance_mode_enable)
4765 if (!intf->in_shutdown)
4766 intf->handlers->request_events(intf->send_info);
4775 struct ipmi_smi *intf;
4783 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4784 if (atomic_read(&intf->event_waiters)) {
4785 intf->ticks_to_req_ev--;
4786 if (intf->ticks_to_req_ev == 0) {
4787 ipmi_request_event(intf);
4788 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
4793 need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
4801 static void need_waiter(struct ipmi_smi *intf)
4876 static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
4888 intf,
4892 intf,
4896 intf->addrinfo[0].address,
4897 intf->addrinfo[0].lun,
4901 else if (intf->handlers->flush_messages)
4902 intf->handlers->flush_messages(intf->send_info);
4905 ipmi_poll(intf);
4908 static void event_receiver_fetcher(struct ipmi_smi *intf,
4916 intf->event_receiver = msg->msg.data[1];
4917 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
4921 static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
4931 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
4932 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
4936 static void send_panic_events(struct ipmi_smi *intf, char *str)
4976 ipmi_panic_request_and_wait(intf, &addr, &msg);
5002 intf->local_sel_device = 0;
5003 intf->local_event_generator = 0;
5004 intf->event_receiver = 0;
5011 intf->null_user_handler = device_id_fetcher;
5012 ipmi_panic_request_and_wait(intf, &addr, &msg);
5014 if (intf->local_event_generator) {
5020 intf->null_user_handler = event_receiver_fetcher;
5021 ipmi_panic_request_and_wait(intf, &addr, &msg);
5023 intf->null_user_handler = NULL;
5030 if (((intf->event_receiver & 1) == 0)
5031 && (intf->event_receiver != 0)
5032 && (intf->event_receiver != intf->addrinfo[0].address)) {
5040 ipmb->lun = intf->event_receiver_lun;
5041 ipmb->slave_addr = intf->event_receiver;
5042 } else if (intf->local_sel_device) {
5069 data[3] = intf->addrinfo[0].address;
5078 ipmi_panic_request_and_wait(intf, &addr, &msg);
5088 struct ipmi_smi *intf;
5096 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
5097 if (!intf->handlers || intf->intf_num == -1)
5101 if (!intf->handlers->poll)
5110 if (!spin_trylock(&intf->xmit_msgs_lock)) {
5111 INIT_LIST_HEAD(&intf->xmit_msgs);
5112 INIT_LIST_HEAD(&intf->hp_xmit_msgs);
5114 spin_unlock(&intf->xmit_msgs_lock);
5116 if (!spin_trylock(&intf->waiting_rcv_msgs_lock))
5117 INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
5119 spin_unlock(&intf->waiting_rcv_msgs_lock);
5121 intf->run_to_completion = 1;
5122 if (intf->handlers->set_run_to_completion)
5123 intf->handlers->set_run_to_completion(intf->send_info,
5126 list_for_each_entry_rcu(user, &intf->users, link) {
5132 send_panic_events(intf, ptr);