Lines Matching refs:intf

45 static void handle_new_recv_msgs(struct ipmi_smi *intf);
46 static void need_waiter(struct ipmi_smi *intf);
47 static int handle_one_recv_msg(struct ipmi_smi *intf,
197 struct ipmi_smi *intf;
330 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
582 void (*null_user_handler)(struct ipmi_smi *intf,
609 static void __get_guid(struct ipmi_smi *intf);
610 static void __ipmi_bmc_unregister(struct ipmi_smi *intf);
611 static int __ipmi_bmc_register(struct ipmi_smi *intf,
614 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
643 #define ipmi_inc_stat(intf, stat) \
644 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
645 #define ipmi_get_stat(intf, stat) \
646 ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
701 static void clean_up_interface_data(struct ipmi_smi *intf)
707 tasklet_kill(&intf->recv_tasklet);
709 free_smi_msg_list(&intf->waiting_rcv_msgs);
710 free_recv_msg_list(&intf->waiting_events);
716 mutex_lock(&intf->cmd_rcvrs_mutex);
718 list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
719 mutex_unlock(&intf->cmd_rcvrs_mutex);
725 if ((intf->seq_table[i].inuse)
726 && (intf->seq_table[i].recv_msg))
727 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
733 struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
735 clean_up_interface_data(intf);
736 kfree(intf);
741 struct ipmi_smi *intf;
757 list_for_each_entry_rcu(intf, &ipmi_interfaces, link,
759 int intf_num = READ_ONCE(intf->intf_num);
763 watcher->new_smi(intf_num, intf->si_dev);
922 static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
928 if (intf->null_user_handler) {
929 intf->null_user_handler(intf, msg);
961 static void deliver_local_response(struct ipmi_smi *intf,
964 if (deliver_response(intf, msg))
965 ipmi_inc_stat(intf, unhandled_local_responses);
967 ipmi_inc_stat(intf, handled_local_responses);
970 static void deliver_err_response(struct ipmi_smi *intf,
978 deliver_local_response(intf, msg);
981 static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags)
985 if (!intf->handlers->set_need_watch)
988 spin_lock_irqsave(&intf->watch_lock, iflags);
990 intf->response_waiters++;
993 intf->watchdog_waiters++;
996 intf->command_waiters++;
998 if ((intf->last_watch_mask & flags) != flags) {
999 intf->last_watch_mask |= flags;
1000 intf->handlers->set_need_watch(intf->send_info,
1001 intf->last_watch_mask);
1003 spin_unlock_irqrestore(&intf->watch_lock, iflags);
1006 static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags)
1010 if (!intf->handlers->set_need_watch)
1013 spin_lock_irqsave(&intf->watch_lock, iflags);
1015 intf->response_waiters--;
1018 intf->watchdog_waiters--;
1021 intf->command_waiters--;
1024 if (intf->response_waiters)
1026 if (intf->watchdog_waiters)
1028 if (intf->command_waiters)
1031 if (intf->last_watch_mask != flags) {
1032 intf->last_watch_mask = flags;
1033 intf->handlers->set_need_watch(intf->send_info,
1034 intf->last_watch_mask);
1036 spin_unlock_irqrestore(&intf->watch_lock, iflags);
1044 static int intf_next_seq(struct ipmi_smi *intf,
1060 for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
1062 if (!intf->seq_table[i].inuse)
1066 if (!intf->seq_table[i].inuse) {
1067 intf->seq_table[i].recv_msg = recv_msg;
1073 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
1074 intf->seq_table[i].orig_timeout = timeout;
1075 intf->seq_table[i].retries_left = retries;
1076 intf->seq_table[i].broadcast = broadcast;
1077 intf->seq_table[i].inuse = 1;
1078 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
1080 *seqid = intf->seq_table[i].seqid;
1081 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
1082 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1083 need_waiter(intf);
1098 static int intf_find_seq(struct ipmi_smi *intf,
1112 spin_lock_irqsave(&intf->seq_lock, flags);
1113 if (intf->seq_table[seq].inuse) {
1114 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
1120 intf->seq_table[seq].inuse = 0;
1121 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1125 spin_unlock_irqrestore(&intf->seq_lock, flags);
1132 static int intf_start_seq_timer(struct ipmi_smi *intf,
1143 spin_lock_irqsave(&intf->seq_lock, flags);
1148 if ((intf->seq_table[seq].inuse)
1149 && (intf->seq_table[seq].seqid == seqid)) {
1150 struct seq_table *ent = &intf->seq_table[seq];
1154 spin_unlock_irqrestore(&intf->seq_lock, flags);
1160 static int intf_err_seq(struct ipmi_smi *intf,
1173 spin_lock_irqsave(&intf->seq_lock, flags);
1178 if ((intf->seq_table[seq].inuse)
1179 && (intf->seq_table[seq].seqid == seqid)) {
1180 struct seq_table *ent = &intf->seq_table[seq];
1183 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1187 spin_unlock_irqrestore(&intf->seq_lock, flags);
1190 deliver_err_response(intf, msg, err);
1212 struct ipmi_smi *intf;
1238 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1239 if (intf->intf_num == if_num)
1247 if (atomic_add_return(1, &intf->nr_users) > max_users) {
1258 if (!try_module_get(intf->owner)) {
1264 kref_get(&intf->refcount);
1270 new_user->intf = intf;
1274 spin_lock_irqsave(&intf->seq_lock, flags);
1275 list_add_rcu(&new_user->link, &intf->users);
1276 spin_unlock_irqrestore(&intf->seq_lock, flags);
1279 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1285 atomic_dec(&intf->nr_users);
1295 struct ipmi_smi *intf;
1298 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1299 if (intf->intf_num == if_num)
1308 if (!intf->handlers->get_smi_info)
1311 rv = intf->handlers->get_smi_info(intf->send_info, data);
1328 struct ipmi_smi *intf = user->intf;
1353 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1356 atomic_dec(&intf->event_waiters);
1359 spin_lock_irqsave(&intf->seq_lock, flags);
1361 atomic_dec(&intf->nr_users);
1364 if (intf->seq_table[i].inuse
1365 && (intf->seq_table[i].recv_msg->user == user)) {
1366 intf->seq_table[i].inuse = 0;
1367 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1368 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
1371 spin_unlock_irqrestore(&intf->seq_lock, flags);
1379 mutex_lock(&intf->cmd_rcvrs_mutex);
1380 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1381 lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1388 mutex_unlock(&intf->cmd_rcvrs_mutex);
1396 owner = intf->owner;
1397 kref_put(&intf->refcount, intf_free);
1422 rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL);
1447 user->intf->addrinfo[channel].address = address;
1469 *address = user->intf->addrinfo[channel].address;
1491 user->intf->addrinfo[channel].lun = LUN & 0x3;
1513 *address = user->intf->addrinfo[channel].lun;
1530 spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1531 mode = user->intf->maintenance_mode;
1532 spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1539 static void maintenance_mode_update(struct ipmi_smi *intf)
1541 if (intf->handlers->set_maintenance_mode)
1542 intf->handlers->set_maintenance_mode(
1543 intf->send_info, intf->maintenance_mode_enable);
1550 struct ipmi_smi *intf = user->intf;
1556 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1557 if (intf->maintenance_mode != mode) {
1560 intf->maintenance_mode_enable
1561 = (intf->auto_maintenance_timeout > 0);
1565 intf->maintenance_mode_enable = false;
1569 intf->maintenance_mode_enable = true;
1576 intf->maintenance_mode = mode;
1578 maintenance_mode_update(intf);
1581 spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1591 struct ipmi_smi *intf = user->intf;
1602 spin_lock_irqsave(&intf->events_lock, flags);
1609 if (atomic_inc_return(&intf->event_waiters) == 1)
1610 need_waiter(intf);
1612 atomic_dec(&intf->event_waiters);
1615 if (intf->delivering_events)
1623 while (user->gets_events && !list_empty(&intf->waiting_events)) {
1624 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1626 intf->waiting_events_count = 0;
1627 if (intf->event_msg_printed) {
1628 dev_warn(intf->si_dev, "Event queue no longer full\n");
1629 intf->event_msg_printed = 0;
1632 intf->delivering_events = 1;
1633 spin_unlock_irqrestore(&intf->events_lock, flags);
1638 deliver_local_response(intf, msg);
1641 spin_lock_irqsave(&intf->events_lock, flags);
1642 intf->delivering_events = 0;
1646 spin_unlock_irqrestore(&intf->events_lock, flags);
1653 static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf,
1660 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1661 lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1669 static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf,
1676 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1677 lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1690 struct ipmi_smi *intf = user->intf;
1708 mutex_lock(&intf->cmd_rcvrs_mutex);
1710 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1715 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1717 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1720 mutex_unlock(&intf->cmd_rcvrs_mutex);
1735 struct ipmi_smi *intf = user->intf;
1744 mutex_lock(&intf->cmd_rcvrs_mutex);
1748 rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1761 mutex_unlock(&intf->cmd_rcvrs_mutex);
1765 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1866 static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf,
1870 if (intf->curr_msg) {
1872 list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
1874 list_add_tail(&smi_msg->link, &intf->xmit_msgs);
1877 intf->curr_msg = smi_msg;
1883 static void smi_send(struct ipmi_smi *intf,
1887 int run_to_completion = intf->run_to_completion;
1891 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
1892 smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1895 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
1898 handlers->sender(intf->send_info, smi_msg);
1909 static int i_ipmi_req_sysintf(struct ipmi_smi *intf,
1926 ipmi_inc_stat(intf, sent_invalid_commands);
1940 ipmi_inc_stat(intf, sent_invalid_commands);
1947 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1948 intf->auto_maintenance_timeout
1950 if (!intf->maintenance_mode
1951 && !intf->maintenance_mode_enable) {
1952 intf->maintenance_mode_enable = true;
1953 maintenance_mode_update(intf);
1955 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1960 ipmi_inc_stat(intf, sent_invalid_commands);
1971 ipmi_inc_stat(intf, sent_local_commands);
1976 static int i_ipmi_req_ipmb(struct ipmi_smi *intf,
1995 ipmi_inc_stat(intf, sent_invalid_commands);
1999 chans = READ_ONCE(intf->channel_list)->c;
2002 ipmi_inc_stat(intf, sent_invalid_commands);
2022 ipmi_inc_stat(intf, sent_invalid_commands);
2028 ipmi_inc_stat(intf, sent_invalid_commands);
2039 ipmi_inc_stat(intf, sent_ipmb_responses);
2053 spin_lock_irqsave(&intf->seq_lock, flags);
2056 intf->ipmb_maintenance_mode_timeout =
2059 if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0)
2067 rv = intf_next_seq(intf,
2081 ipmi_inc_stat(intf, sent_ipmb_commands);
2111 spin_unlock_irqrestore(&intf->seq_lock, flags);
2117 static int i_ipmi_req_ipmb_direct(struct ipmi_smi *intf,
2128 if (!(intf->handlers->flags & IPMI_SMI_CAN_HANDLE_IPMB_DIRECT))
2133 ipmi_inc_stat(intf, sent_invalid_commands);
2138 ipmi_inc_stat(intf, sent_invalid_commands);
2144 ipmi_inc_stat(intf, sent_invalid_commands);
2169 static int i_ipmi_req_lan(struct ipmi_smi *intf,
2186 ipmi_inc_stat(intf, sent_invalid_commands);
2190 chans = READ_ONCE(intf->channel_list)->c;
2196 ipmi_inc_stat(intf, sent_invalid_commands);
2202 ipmi_inc_stat(intf, sent_invalid_commands);
2208 ipmi_inc_stat(intf, sent_invalid_commands);
2219 ipmi_inc_stat(intf, sent_lan_responses);
2232 spin_lock_irqsave(&intf->seq_lock, flags);
2238 rv = intf_next_seq(intf,
2252 ipmi_inc_stat(intf, sent_lan_commands);
2281 spin_unlock_irqrestore(&intf->seq_lock, flags);
2294 struct ipmi_smi *intf,
2343 if (intf->in_shutdown) {
2360 rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg,
2363 rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg,
2367 rv = i_ipmi_req_ipmb_direct(intf, addr, msgid, msg, smi_msg,
2370 rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg,
2374 ipmi_inc_stat(intf, sent_invalid_commands);
2383 dev_dbg(intf->si_dev, "Send: %*ph\n",
2386 smi_send(intf, intf->handlers, smi_msg, priority);
2396 static int check_addr(struct ipmi_smi *intf,
2404 *lun = intf->addrinfo[addr->channel].lun;
2405 *saddr = intf->addrinfo[addr->channel].address;
2428 rv = check_addr(user->intf, addr, &saddr, &lun);
2431 user->intf,
2467 rv = check_addr(user->intf, addr, &saddr, &lun);
2470 user->intf,
2487 static void bmc_device_id_handler(struct ipmi_smi *intf,
2495 dev_warn(intf->si_dev,
2502 dev_warn(intf->si_dev, "device id fetch failed: 0x%2.2x\n",
2504 intf->bmc->dyn_id_set = 0;
2509 msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
2511 dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv);
2513 intf->bmc->cc = msg->msg.data[0];
2514 intf->bmc->dyn_id_set = 0;
2521 intf->bmc->dyn_id_set = 1;
2524 wake_up(&intf->waitq);
2528 send_get_device_id_cmd(struct ipmi_smi *intf)
2543 intf,
2547 intf,
2551 intf->addrinfo[0].address,
2552 intf->addrinfo[0].lun,
2556 static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
2561 intf->null_user_handler = bmc_device_id_handler;
2567 rv = send_get_device_id_cmd(intf);
2571 wait_event(intf->waitq, bmc->dyn_id_set != 2);
2577 dev_warn(intf->si_dev,
2590 intf->null_user_handler = NULL;
2597 * bmc or intf, this code will get the other one. If the data has
2604 static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2610 bool intf_set = intf != NULL;
2612 if (!intf) {
2619 intf = list_first_entry(&bmc->intfs, struct ipmi_smi,
2621 kref_get(&intf->refcount);
2623 mutex_lock(&intf->bmc_reg_mutex);
2625 if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi,
2627 mutex_unlock(&intf->bmc_reg_mutex);
2628 kref_put(&intf->refcount, intf_free);
2632 mutex_lock(&intf->bmc_reg_mutex);
2633 bmc = intf->bmc;
2635 kref_get(&intf->refcount);
2639 if (intf->in_bmc_register ||
2644 __get_guid(intf);
2647 rv = __get_device_id(intf, bmc);
2655 if (!intf->bmc_registered
2670 __ipmi_bmc_unregister(intf);
2672 intf->bmc->id = id;
2673 intf->bmc->dyn_guid_set = guid_set;
2674 intf->bmc->guid = guid;
2675 if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
2676 need_waiter(intf); /* Retry later on an error. */
2678 __scan_channels(intf, &id);
2687 mutex_unlock(&intf->bmc_reg_mutex);
2693 bmc = intf->bmc;
2698 __scan_channels(intf, &bmc->fetch_id);
2731 mutex_unlock(&intf->bmc_reg_mutex);
2733 kref_put(&intf->refcount, intf_free);
2737 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2741 return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1);
3073 * Must be called with intf->bmc_reg_mutex held.
3075 static void __ipmi_bmc_unregister(struct ipmi_smi *intf)
3077 struct bmc_device *bmc = intf->bmc;
3079 if (!intf->bmc_registered)
3082 sysfs_remove_link(&intf->si_dev->kobj, "bmc");
3083 sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
3084 kfree(intf->my_dev_name);
3085 intf->my_dev_name = NULL;
3088 list_del(&intf->bmc_link);
3090 intf->bmc = &intf->tmp_bmc;
3092 intf->bmc_registered = false;
3095 static void ipmi_bmc_unregister(struct ipmi_smi *intf)
3097 mutex_lock(&intf->bmc_reg_mutex);
3098 __ipmi_bmc_unregister(intf);
3099 mutex_unlock(&intf->bmc_reg_mutex);
3103 * Must be called with intf->bmc_reg_mutex held.
3105 static int __ipmi_bmc_register(struct ipmi_smi *intf,
3119 intf->in_bmc_register = true;
3120 mutex_unlock(&intf->bmc_reg_mutex);
3144 intf->bmc = old_bmc;
3146 list_add_tail(&intf->bmc_link, &bmc->intfs);
3149 dev_info(intf->si_dev,
3184 intf->bmc = bmc;
3186 list_add_tail(&intf->bmc_link, &bmc->intfs);
3191 dev_err(intf->si_dev,
3197 dev_info(intf->si_dev,
3208 rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
3210 dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv);
3215 intf_num = intf->intf_num;
3216 intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num);
3217 if (!intf->my_dev_name) {
3219 dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n",
3224 rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
3225 intf->my_dev_name);
3227 dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n",
3232 intf->bmc_registered = true;
3236 mutex_lock(&intf->bmc_reg_mutex);
3237 intf->in_bmc_register = false;
3242 kfree(intf->my_dev_name);
3243 intf->my_dev_name = NULL;
3246 sysfs_remove_link(&intf->si_dev->kobj, "bmc");
3250 list_del(&intf->bmc_link);
3252 intf->bmc = &intf->tmp_bmc;
3258 list_del(&intf->bmc_link);
3260 intf->bmc = &intf->tmp_bmc;
3266 send_guid_cmd(struct ipmi_smi *intf, int chan)
3280 intf,
3284 intf,
3288 intf->addrinfo[0].address,
3289 intf->addrinfo[0].lun,
3293 static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3295 struct bmc_device *bmc = intf->bmc;
3311 dev_warn(intf->si_dev,
3325 wake_up(&intf->waitq);
3328 static void __get_guid(struct ipmi_smi *intf)
3331 struct bmc_device *bmc = intf->bmc;
3334 intf->null_user_handler = guid_handler;
3335 rv = send_guid_cmd(intf, 0);
3340 wait_event(intf->waitq, bmc->dyn_guid_set != 2);
3345 intf->null_user_handler = NULL;
3349 send_channel_info_cmd(struct ipmi_smi *intf, int chan)
3365 intf,
3369 intf,
3373 intf->addrinfo[0].address,
3374 intf->addrinfo[0].lun,
3379 channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3383 unsigned int set = intf->curr_working_cset;
3399 intf->wchannels[set].c[0].medium
3401 intf->wchannels[set].c[0].protocol
3404 intf->channel_list = intf->wchannels + set;
3405 intf->channels_ready = true;
3406 wake_up(&intf->waitq);
3415 ch = intf->curr_channel;
3416 chans = intf->wchannels[set].c;
3421 intf->curr_channel++;
3422 if (intf->curr_channel >= IPMI_MAX_CHANNELS) {
3423 intf->channel_list = intf->wchannels + set;
3424 intf->channels_ready = true;
3425 wake_up(&intf->waitq);
3427 intf->channel_list = intf->wchannels + set;
3428 intf->channels_ready = true;
3429 rv = send_channel_info_cmd(intf, intf->curr_channel);
3434 dev_warn(intf->si_dev,
3436 intf->curr_channel, rv);
3438 intf->channel_list = intf->wchannels + set;
3439 intf->channels_ready = true;
3440 wake_up(&intf->waitq);
3448 * Must be holding intf->bmc_reg_mutex to call this.
3450 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id)
3463 set = !intf->curr_working_cset;
3464 intf->curr_working_cset = set;
3465 memset(&intf->wchannels[set], 0,
3468 intf->null_user_handler = channel_handler;
3469 intf->curr_channel = 0;
3470 rv = send_channel_info_cmd(intf, 0);
3472 dev_warn(intf->si_dev,
3475 intf->null_user_handler = NULL;
3480 wait_event(intf->waitq, intf->channels_ready);
3481 intf->null_user_handler = NULL;
3483 unsigned int set = intf->curr_working_cset;
3486 intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
3487 intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
3488 intf->channel_list = intf->wchannels + set;
3489 intf->channels_ready = true;
3495 static void ipmi_poll(struct ipmi_smi *intf)
3497 if (intf->handlers->poll)
3498 intf->handlers->poll(intf->send_info);
3500 handle_new_recv_msgs(intf);
3505 ipmi_poll(user->intf);
3513 struct ipmi_smi *intf = container_of(attr,
3516 return sysfs_emit(buf, "%d\n", atomic_read(&intf->nr_users));
3524 struct ipmi_smi *intf = container_of(attr,
3530 index = srcu_read_lock(&intf->users_srcu);
3531 list_for_each_entry_rcu(user, &intf->users, link)
3533 srcu_read_unlock(&intf->users_srcu, index);
3541 struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
3544 if (!intf->in_shutdown)
3545 bmc_get_device_id(intf, NULL, NULL, NULL, NULL);
3547 kref_put(&intf->refcount, intf_free);
3558 struct ipmi_smi *intf, *tintf;
3570 intf = kzalloc(sizeof(*intf), GFP_KERNEL);
3571 if (!intf)
3574 rv = init_srcu_struct(&intf->users_srcu);
3576 kfree(intf);
3580 intf->owner = owner;
3581 intf->bmc = &intf->tmp_bmc;
3582 INIT_LIST_HEAD(&intf->bmc->intfs);
3583 mutex_init(&intf->bmc->dyn_mutex);
3584 INIT_LIST_HEAD(&intf->bmc_link);
3585 mutex_init(&intf->bmc_reg_mutex);
3586 intf->intf_num = -1; /* Mark it invalid for now. */
3587 kref_init(&intf->refcount);
3588 INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg);
3589 intf->si_dev = si_dev;
3591 intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR;
3592 intf->addrinfo[j].lun = 2;
3595 intf->addrinfo[0].address = slave_addr;
3596 INIT_LIST_HEAD(&intf->users);
3597 atomic_set(&intf->nr_users, 0);
3598 intf->handlers = handlers;
3599 intf->send_info = send_info;
3600 spin_lock_init(&intf->seq_lock);
3602 intf->seq_table[j].inuse = 0;
3603 intf->seq_table[j].seqid = 0;
3605 intf->curr_seq = 0;
3606 spin_lock_init(&intf->waiting_rcv_msgs_lock);
3607 INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
3608 tasklet_setup(&intf->recv_tasklet,
3610 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
3611 spin_lock_init(&intf->xmit_msgs_lock);
3612 INIT_LIST_HEAD(&intf->xmit_msgs);
3613 INIT_LIST_HEAD(&intf->hp_xmit_msgs);
3614 spin_lock_init(&intf->events_lock);
3615 spin_lock_init(&intf->watch_lock);
3616 atomic_set(&intf->event_waiters, 0);
3617 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3618 INIT_LIST_HEAD(&intf->waiting_events);
3619 intf->waiting_events_count = 0;
3620 mutex_init(&intf->cmd_rcvrs_mutex);
3621 spin_lock_init(&intf->maintenance_mode_lock);
3622 INIT_LIST_HEAD(&intf->cmd_rcvrs);
3623 init_waitqueue_head(&intf->waitq);
3625 atomic_set(&intf->stats[i], 0);
3641 list_add_rcu(&intf->link, &ipmi_interfaces);
3643 list_add_tail_rcu(&intf->link, link);
3645 rv = handlers->start_processing(send_info, intf);
3649 rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
3655 mutex_lock(&intf->bmc_reg_mutex);
3656 rv = __scan_channels(intf, &id);
3657 mutex_unlock(&intf->bmc_reg_mutex);
3661 intf->nr_users_devattr = dev_attr_nr_users;
3662 sysfs_attr_init(&intf->nr_users_devattr.attr);
3663 rv = device_create_file(intf->si_dev, &intf->nr_users_devattr);
3667 intf->nr_msgs_devattr = dev_attr_nr_msgs;
3668 sysfs_attr_init(&intf->nr_msgs_devattr.attr);
3669 rv = device_create_file(intf->si_dev, &intf->nr_msgs_devattr);
3671 device_remove_file(intf->si_dev, &intf->nr_users_devattr);
3681 intf->intf_num = i;
3685 call_smi_watchers(i, intf->si_dev);
3690 ipmi_bmc_unregister(intf);
3692 if (intf->handlers->shutdown)
3693 intf->handlers->shutdown(intf->send_info);
3695 list_del_rcu(&intf->link);
3698 cleanup_srcu_struct(&intf->users_srcu);
3699 kref_put(&intf->refcount, intf_free);
3705 static void deliver_smi_err_response(struct ipmi_smi *intf,
3716 rv = handle_one_recv_msg(intf, msg);
3721 static void cleanup_smi_msgs(struct ipmi_smi *intf)
3731 list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
3732 list_splice_tail(&intf->xmit_msgs, &tmplist);
3735 while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
3750 deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
3754 ent = &intf->seq_table[i];
3757 deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED);
3761 void ipmi_unregister_smi(struct ipmi_smi *intf)
3766 if (!intf)
3768 intf_num = intf->intf_num;
3770 intf->intf_num = -1;
3771 intf->in_shutdown = true;
3772 list_del_rcu(&intf->link);
3778 device_remove_file(intf->si_dev, &intf->nr_msgs_devattr);
3779 device_remove_file(intf->si_dev, &intf->nr_users_devattr);
3790 index = srcu_read_lock(&intf->users_srcu);
3791 while (!list_empty(&intf->users)) {
3793 container_of(list_next_rcu(&intf->users),
3798 srcu_read_unlock(&intf->users_srcu, index);
3800 if (intf->handlers->shutdown)
3801 intf->handlers->shutdown(intf->send_info);
3803 cleanup_smi_msgs(intf);
3805 ipmi_bmc_unregister(intf);
3807 cleanup_srcu_struct(&intf->users_srcu);
3808 kref_put(&intf->refcount, intf_free);
3812 static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf,
3824 ipmi_inc_stat(intf, invalid_ipmb_responses);
3842 if (intf_find_seq(intf,
3853 ipmi_inc_stat(intf, unhandled_ipmb_responses);
3867 if (deliver_response(intf, recv_msg))
3868 ipmi_inc_stat(intf, unhandled_ipmb_responses);
3870 ipmi_inc_stat(intf, handled_ipmb_responses);
3875 static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
3889 ipmi_inc_stat(intf, invalid_commands);
3903 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3913 ipmi_inc_stat(intf, unhandled_commands);
3921 msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address;
3929 dev_dbg(intf->si_dev, "Invalid command: %*ph\n",
3933 if (!intf->in_shutdown) {
3934 smi_send(intf, intf->handlers, msg, 0);
3979 if (deliver_response(intf, recv_msg))
3980 ipmi_inc_stat(intf, unhandled_commands);
3982 ipmi_inc_stat(intf, handled_commands);
3989 static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
4002 rcvr = find_cmd_rcvr(intf, netfn, cmd, 0);
4012 ipmi_inc_stat(intf, unhandled_commands);
4024 if (!intf->in_shutdown) {
4025 smi_send(intf, intf->handlers, msg, 0);
4067 if (deliver_response(intf, recv_msg))
4068 ipmi_inc_stat(intf, unhandled_commands);
4070 ipmi_inc_stat(intf, handled_commands);
4077 static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf,
4085 dev_warn(intf->si_dev,
4103 deliver_local_response(intf, recv_msg);
4108 static int handle_lan_get_msg_rsp(struct ipmi_smi *intf,
4121 ipmi_inc_stat(intf, invalid_lan_responses);
4142 if (intf_find_seq(intf,
4153 ipmi_inc_stat(intf, unhandled_lan_responses);
4167 if (deliver_response(intf, recv_msg))
4168 ipmi_inc_stat(intf, unhandled_lan_responses);
4170 ipmi_inc_stat(intf, handled_lan_responses);
4175 static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
4189 ipmi_inc_stat(intf, invalid_commands);
4203 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
4213 ipmi_inc_stat(intf, unhandled_commands);
4258 if (deliver_response(intf, recv_msg))
4259 ipmi_inc_stat(intf, unhandled_commands);
4261 ipmi_inc_stat(intf, handled_commands);
4274 static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
4292 ipmi_inc_stat(intf, invalid_commands);
4310 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
4320 ipmi_inc_stat(intf, unhandled_commands);
4365 if (deliver_response(intf, recv_msg))
4366 ipmi_inc_stat(intf, unhandled_commands);
4368 ipmi_inc_stat(intf, handled_commands);
4393 static int handle_read_event_rsp(struct ipmi_smi *intf,
4404 ipmi_inc_stat(intf, invalid_events);
4415 spin_lock_irqsave(&intf->events_lock, flags);
4417 ipmi_inc_stat(intf, events);
4423 index = srcu_read_lock(&intf->users_srcu);
4424 list_for_each_entry_rcu(user, &intf->users, link) {
4452 srcu_read_unlock(&intf->users_srcu, index);
4458 deliver_local_response(intf, recv_msg);
4460 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
4477 list_add_tail(&recv_msg->link, &intf->waiting_events);
4478 intf->waiting_events_count++;
4479 } else if (!intf->event_msg_printed) {
4484 dev_warn(intf->si_dev,
4486 intf->event_msg_printed = 1;
4490 spin_unlock_irqrestore(&intf->events_lock, flags);
4495 static int handle_bmc_rsp(struct ipmi_smi *intf,
4503 dev_warn(intf->si_dev,
4520 deliver_local_response(intf, recv_msg);
4530 static int handle_one_recv_msg(struct ipmi_smi *intf,
4538 dev_dbg(intf->si_dev, "Recv: %*ph\n", msg->rsp_size, msg->rsp);
4542 dev_warn(intf->si_dev,
4555 ipmi_inc_stat(intf, invalid_commands);
4559 ipmi_inc_stat(intf, invalid_ipmb_responses);
4576 if (intf->in_shutdown)
4602 chans = READ_ONCE(intf->channel_list)->c;
4605 ipmi_inc_stat(intf, sent_lan_command_errs);
4607 ipmi_inc_stat(intf, sent_ipmb_command_errs);
4608 intf_err_seq(intf, msg->msgid, msg->rsp[2]);
4611 intf_start_seq_timer(intf, msg->msgid);
4620 dev_warn(intf->si_dev,
4636 requeue = handle_ipmb_direct_rcv_cmd(intf, msg);
4638 requeue = handle_ipmb_direct_rcv_rsp(intf, msg);
4665 deliver_local_response(intf, recv_msg);
4684 if (!intf->channels_ready) {
4689 chans = READ_ONCE(intf->channel_list)->c;
4698 requeue = handle_ipmb_get_msg_rsp(intf, msg);
4704 requeue = handle_ipmb_get_msg_cmd(intf, msg);
4715 requeue = handle_lan_get_msg_rsp(intf, msg);
4721 requeue = handle_lan_get_msg_cmd(intf, msg);
4731 requeue = handle_oem_get_msg_cmd(intf, msg);
4744 requeue = handle_read_event_rsp(intf, msg);
4747 requeue = handle_bmc_rsp(intf, msg);
4757 static void handle_new_recv_msgs(struct ipmi_smi *intf)
4762 int run_to_completion = intf->run_to_completion;
4766 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4767 while (!list_empty(&intf->waiting_rcv_msgs)) {
4768 smi_msg = list_entry(intf->waiting_rcv_msgs.next,
4772 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4774 rv = handle_one_recv_msg(intf, smi_msg);
4776 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4785 list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
4795 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
4801 if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
4805 index = srcu_read_lock(&intf->users_srcu);
4806 list_for_each_entry_rcu(user, &intf->users, link) {
4811 srcu_read_unlock(&intf->users_srcu, index);
4818 struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet);
4819 int run_to_completion = intf->run_to_completion;
4833 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4834 if (intf->curr_msg == NULL && !intf->in_shutdown) {
4838 if (!list_empty(&intf->hp_xmit_msgs))
4839 entry = intf->hp_xmit_msgs.next;
4840 else if (!list_empty(&intf->xmit_msgs))
4841 entry = intf->xmit_msgs.next;
4846 intf->curr_msg = newmsg;
4851 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4853 intf->handlers->sender(intf->send_info, newmsg);
4857 handle_new_recv_msgs(intf);
4861 void ipmi_smi_msg_received(struct ipmi_smi *intf,
4865 int run_to_completion = intf->run_to_completion;
4872 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4873 list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
4875 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4879 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4884 if (msg == intf->curr_msg)
4885 intf->curr_msg = NULL;
4887 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4890 smi_recv_tasklet(&intf->recv_tasklet);
4892 tasklet_schedule(&intf->recv_tasklet);
4896 void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
4898 if (intf->in_shutdown)
4901 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
4902 tasklet_schedule(&intf->recv_tasklet);
4907 smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
4922 dev_dbg(intf->si_dev, "Resend: %*ph\n",
4928 static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
4936 if (intf->in_shutdown)
4951 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
4955 ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
4957 ipmi_inc_stat(intf, timed_out_lan_commands);
4959 ipmi_inc_stat(intf, timed_out_ipmb_commands);
4972 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
4976 ipmi_inc_stat(intf,
4979 ipmi_inc_stat(intf,
4984 spin_unlock_irqrestore(&intf->seq_lock, *flags);
4993 if (intf->handlers) {
4995 ipmi_inc_stat(intf,
4998 ipmi_inc_stat(intf,
5001 smi_send(intf, intf->handlers, smi_msg, 0);
5005 spin_lock_irqsave(&intf->seq_lock, *flags);
5009 static bool ipmi_timeout_handler(struct ipmi_smi *intf,
5018 if (!intf->bmc_registered) {
5019 kref_get(&intf->refcount);
5020 if (!schedule_work(&intf->bmc_reg_work)) {
5021 kref_put(&intf->refcount, intf_free);
5032 spin_lock_irqsave(&intf->seq_lock, flags);
5033 if (intf->ipmb_maintenance_mode_timeout) {
5034 if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
5035 intf->ipmb_maintenance_mode_timeout = 0;
5037 intf->ipmb_maintenance_mode_timeout -= timeout_period;
5040 check_msg_timeout(intf, &intf->seq_table[i],
5043 spin_unlock_irqrestore(&intf->seq_lock, flags);
5046 deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
5056 if (intf->auto_maintenance_timeout > 0) {
5057 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
5058 if (intf->auto_maintenance_timeout > 0) {
5059 intf->auto_maintenance_timeout
5061 if (!intf->maintenance_mode
5062 && (intf->auto_maintenance_timeout <= 0)) {
5063 intf->maintenance_mode_enable = false;
5064 maintenance_mode_update(intf);
5067 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
5071 tasklet_schedule(&intf->recv_tasklet);
5076 static void ipmi_request_event(struct ipmi_smi *intf)
5079 if (intf->maintenance_mode_enable)
5082 if (!intf->in_shutdown)
5083 intf->handlers->request_events(intf->send_info);
5092 struct ipmi_smi *intf;
5100 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
5101 if (atomic_read(&intf->event_waiters)) {
5102 intf->ticks_to_req_ev--;
5103 if (intf->ticks_to_req_ev == 0) {
5104 ipmi_request_event(intf);
5105 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
5110 need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
5118 static void need_waiter(struct ipmi_smi *intf)
5194 static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
5206 intf,
5210 intf,
5214 intf->addrinfo[0].address,
5215 intf->addrinfo[0].lun,
5219 else if (intf->handlers->flush_messages)
5220 intf->handlers->flush_messages(intf->send_info);
5223 ipmi_poll(intf);
5226 static void event_receiver_fetcher(struct ipmi_smi *intf,
5234 intf->event_receiver = msg->msg.data[1];
5235 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
5239 static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
5249 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
5250 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
5254 static void send_panic_events(struct ipmi_smi *intf, char *str)
5294 ipmi_panic_request_and_wait(intf, &addr, &msg);
5320 intf->local_sel_device = 0;
5321 intf->local_event_generator = 0;
5322 intf->event_receiver = 0;
5329 intf->null_user_handler = device_id_fetcher;
5330 ipmi_panic_request_and_wait(intf, &addr, &msg);
5332 if (intf->local_event_generator) {
5338 intf->null_user_handler = event_receiver_fetcher;
5339 ipmi_panic_request_and_wait(intf, &addr, &msg);
5341 intf->null_user_handler = NULL;
5348 if (((intf->event_receiver & 1) == 0)
5349 && (intf->event_receiver != 0)
5350 && (intf->event_receiver != intf->addrinfo[0].address)) {
5358 ipmb->lun = intf->event_receiver_lun;
5359 ipmb->slave_addr = intf->event_receiver;
5360 } else if (intf->local_sel_device) {
5387 data[3] = intf->addrinfo[0].address;
5396 ipmi_panic_request_and_wait(intf, &addr, &msg);
5406 struct ipmi_smi *intf;
5414 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
5415 if (!intf->handlers || intf->intf_num == -1)
5419 if (!intf->handlers->poll)
5428 if (!spin_trylock(&intf->xmit_msgs_lock)) {
5429 INIT_LIST_HEAD(&intf->xmit_msgs);
5430 INIT_LIST_HEAD(&intf->hp_xmit_msgs);
5432 spin_unlock(&intf->xmit_msgs_lock);
5434 if (!spin_trylock(&intf->waiting_rcv_msgs_lock))
5435 INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
5437 spin_unlock(&intf->waiting_rcv_msgs_lock);
5439 intf->run_to_completion = 1;
5440 if (intf->handlers->set_run_to_completion)
5441 intf->handlers->set_run_to_completion(intf->send_info,
5444 list_for_each_entry_rcu(user, &intf->users, link) {
5450 send_panic_events(intf, ptr);