1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * ipmi_msghandler.c
4 *
5 * Incoming and outgoing message routing for an IPMI interface.
6 *
7 * Author: MontaVista Software, Inc.
8 *         Corey Minyard <minyard@mvista.com>
9 *         source@mvista.com
10 *
11 * Copyright 2002 MontaVista Software Inc.
12 */
13
14#define pr_fmt(fmt) "IPMI message handler: " fmt
15#define dev_fmt(fmt) pr_fmt(fmt)
16
17#include <linux/module.h>
18#include <linux/errno.h>
19#include <linux/poll.h>
20#include <linux/sched.h>
21#include <linux/seq_file.h>
22#include <linux/spinlock.h>
23#include <linux/mutex.h>
24#include <linux/slab.h>
25#include <linux/ipmi.h>
26#include <linux/ipmi_smi.h>
27#include <linux/notifier.h>
28#include <linux/init.h>
29#include <linux/proc_fs.h>
30#include <linux/rcupdate.h>
31#include <linux/interrupt.h>
32#include <linux/moduleparam.h>
33#include <linux/workqueue.h>
34#include <linux/uuid.h>
35#include <linux/nospec.h>
36#include <linux/vmalloc.h>
37#include <linux/delay.h>
38
39#define IPMI_DRIVER_VERSION "39.2"
40
41static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
42static int ipmi_init_msghandler(void);
43static void smi_recv_tasklet(struct tasklet_struct *t);
44static void handle_new_recv_msgs(struct ipmi_smi *intf);
45static void need_waiter(struct ipmi_smi *intf);
46static int handle_one_recv_msg(struct ipmi_smi *intf,
47			       struct ipmi_smi_msg *msg);
48
49static bool initialized;
50static bool drvregistered;
51
52enum ipmi_panic_event_op {
53	IPMI_SEND_PANIC_EVENT_NONE,
54	IPMI_SEND_PANIC_EVENT,
55	IPMI_SEND_PANIC_EVENT_STRING
56};
57#ifdef CONFIG_IPMI_PANIC_STRING
58#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING
59#elif defined(CONFIG_IPMI_PANIC_EVENT)
60#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT
61#else
62#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE
63#endif
64
65static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT;
66
67static int panic_op_write_handler(const char *val,
68				  const struct kernel_param *kp)
69{
70	char valcp[16];
71	char *s;
72
73	strncpy(valcp, val, 15);
74	valcp[15] = '\0';
75
76	s = strstrip(valcp);
77
78	if (strcmp(s, "none") == 0)
79		ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_NONE;
80	else if (strcmp(s, "event") == 0)
81		ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT;
82	else if (strcmp(s, "string") == 0)
83		ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_STRING;
84	else
85		return -EINVAL;
86
87	return 0;
88}
89
90static int panic_op_read_handler(char *buffer, const struct kernel_param *kp)
91{
92	switch (ipmi_send_panic_event) {
93	case IPMI_SEND_PANIC_EVENT_NONE:
94		strcpy(buffer, "none\n");
95		break;
96
97	case IPMI_SEND_PANIC_EVENT:
98		strcpy(buffer, "event\n");
99		break;
100
101	case IPMI_SEND_PANIC_EVENT_STRING:
102		strcpy(buffer, "string\n");
103		break;
104
105	default:
106		strcpy(buffer, "???\n");
107		break;
108	}
109
110	return strlen(buffer);
111}
112
113static const struct kernel_param_ops panic_op_ops = {
114	.set = panic_op_write_handler,
115	.get = panic_op_read_handler
116};
117module_param_cb(panic_op, &panic_op_ops, NULL, 0600);
118MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic.  Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events.");
119
120
121#define MAX_EVENTS_IN_QUEUE	25
122
123/* Remain in auto-maintenance mode for this amount of time (in ms). */
124static unsigned long maintenance_mode_timeout_ms = 30000;
125module_param(maintenance_mode_timeout_ms, ulong, 0644);
126MODULE_PARM_DESC(maintenance_mode_timeout_ms,
127		 "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode.");
128
129/*
130 * Don't let a message sit in a queue forever, always time it with at lest
131 * the max message timer.  This is in milliseconds.
132 */
133#define MAX_MSG_TIMEOUT		60000
134
135/*
136 * Timeout times below are in milliseconds, and are done off a 1
137 * second timer.  So setting the value to 1000 would mean anything
138 * between 0 and 1000ms.  So really the only reasonable minimum
139 * setting it 2000ms, which is between 1 and 2 seconds.
140 */
141
142/* The default timeout for message retries. */
143static unsigned long default_retry_ms = 2000;
144module_param(default_retry_ms, ulong, 0644);
145MODULE_PARM_DESC(default_retry_ms,
146		 "The time (milliseconds) between retry sends");
147
148/* The default timeout for maintenance mode message retries. */
149static unsigned long default_maintenance_retry_ms = 3000;
150module_param(default_maintenance_retry_ms, ulong, 0644);
151MODULE_PARM_DESC(default_maintenance_retry_ms,
152		 "The time (milliseconds) between retry sends in maintenance mode");
153
154/* The default maximum number of retries */
155static unsigned int default_max_retries = 4;
156module_param(default_max_retries, uint, 0644);
157MODULE_PARM_DESC(default_max_retries,
158		 "The time (milliseconds) between retry sends in maintenance mode");
159
160/* Call every ~1000 ms. */
161#define IPMI_TIMEOUT_TIME	1000
162
163/* How many jiffies does it take to get to the timeout time. */
164#define IPMI_TIMEOUT_JIFFIES	((IPMI_TIMEOUT_TIME * HZ) / 1000)
165
166/*
167 * Request events from the queue every second (this is the number of
168 * IPMI_TIMEOUT_TIMES between event requests).  Hopefully, in the
169 * future, IPMI will add a way to know immediately if an event is in
170 * the queue and this silliness can go away.
171 */
172#define IPMI_REQUEST_EV_TIME	(1000 / (IPMI_TIMEOUT_TIME))
173
174/* How long should we cache dynamic device IDs? */
175#define IPMI_DYN_DEV_ID_EXPIRY	(10 * HZ)
176
177/*
178 * The main "user" data structure.
179 */
180struct ipmi_user {
181	struct list_head link;
182
183	/*
184	 * Set to NULL when the user is destroyed, a pointer to myself
185	 * so srcu_dereference can be used on it.
186	 */
187	struct ipmi_user *self;
188	struct srcu_struct release_barrier;
189
190	struct kref refcount;
191
192	/* The upper layer that handles receive messages. */
193	const struct ipmi_user_hndl *handler;
194	void             *handler_data;
195
196	/* The interface this user is bound to. */
197	struct ipmi_smi *intf;
198
199	/* Does this interface receive IPMI events? */
200	bool gets_events;
201
202	/* Free must run in process context for RCU cleanup. */
203	struct work_struct remove_work;
204};
205
206static struct workqueue_struct *remove_work_wq;
207
208static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
209	__acquires(user->release_barrier)
210{
211	struct ipmi_user *ruser;
212
213	*index = srcu_read_lock(&user->release_barrier);
214	ruser = srcu_dereference(user->self, &user->release_barrier);
215	if (!ruser)
216		srcu_read_unlock(&user->release_barrier, *index);
217	return ruser;
218}
219
220static void release_ipmi_user(struct ipmi_user *user, int index)
221{
222	srcu_read_unlock(&user->release_barrier, index);
223}
224
225struct cmd_rcvr {
226	struct list_head link;
227
228	struct ipmi_user *user;
229	unsigned char netfn;
230	unsigned char cmd;
231	unsigned int  chans;
232
233	/*
234	 * This is used to form a linked lised during mass deletion.
235	 * Since this is in an RCU list, we cannot use the link above
236	 * or change any data until the RCU period completes.  So we
237	 * use this next variable during mass deletion so we can have
238	 * a list and don't have to wait and restart the search on
239	 * every individual deletion of a command.
240	 */
241	struct cmd_rcvr *next;
242};
243
244struct seq_table {
245	unsigned int         inuse : 1;
246	unsigned int         broadcast : 1;
247
248	unsigned long        timeout;
249	unsigned long        orig_timeout;
250	unsigned int         retries_left;
251
252	/*
253	 * To verify on an incoming send message response that this is
254	 * the message that the response is for, we keep a sequence id
255	 * and increment it every time we send a message.
256	 */
257	long                 seqid;
258
259	/*
260	 * This is held so we can properly respond to the message on a
261	 * timeout, and it is used to hold the temporary data for
262	 * retransmission, too.
263	 */
264	struct ipmi_recv_msg *recv_msg;
265};
266
267/*
268 * Store the information in a msgid (long) to allow us to find a
269 * sequence table entry from the msgid.
270 */
271#define STORE_SEQ_IN_MSGID(seq, seqid) \
272	((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
273
274#define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
275	do {								\
276		seq = (((msgid) >> 26) & 0x3f);				\
277		seqid = ((msgid) & 0x3ffffff);				\
278	} while (0)
279
280#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
281
282#define IPMI_MAX_CHANNELS       16
283struct ipmi_channel {
284	unsigned char medium;
285	unsigned char protocol;
286};
287
288struct ipmi_channel_set {
289	struct ipmi_channel c[IPMI_MAX_CHANNELS];
290};
291
292struct ipmi_my_addrinfo {
293	/*
294	 * My slave address.  This is initialized to IPMI_BMC_SLAVE_ADDR,
295	 * but may be changed by the user.
296	 */
297	unsigned char address;
298
299	/*
300	 * My LUN.  This should generally stay the SMS LUN, but just in
301	 * case...
302	 */
303	unsigned char lun;
304};
305
306/*
307 * Note that the product id, manufacturer id, guid, and device id are
308 * immutable in this structure, so dyn_mutex is not required for
309 * accessing those.  If those change on a BMC, a new BMC is allocated.
310 */
311struct bmc_device {
312	struct platform_device pdev;
313	struct list_head       intfs; /* Interfaces on this BMC. */
314	struct ipmi_device_id  id;
315	struct ipmi_device_id  fetch_id;
316	int                    dyn_id_set;
317	unsigned long          dyn_id_expiry;
318	struct mutex           dyn_mutex; /* Protects id, intfs, & dyn* */
319	guid_t                 guid;
320	guid_t                 fetch_guid;
321	int                    dyn_guid_set;
322	struct kref	       usecount;
323	struct work_struct     remove_work;
324	unsigned char	       cc; /* completion code */
325};
326#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
327
328static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
329			     struct ipmi_device_id *id,
330			     bool *guid_set, guid_t *guid);
331
332/*
333 * Various statistics for IPMI, these index stats[] in the ipmi_smi
334 * structure.
335 */
336enum ipmi_stat_indexes {
337	/* Commands we got from the user that were invalid. */
338	IPMI_STAT_sent_invalid_commands = 0,
339
340	/* Commands we sent to the MC. */
341	IPMI_STAT_sent_local_commands,
342
343	/* Responses from the MC that were delivered to a user. */
344	IPMI_STAT_handled_local_responses,
345
346	/* Responses from the MC that were not delivered to a user. */
347	IPMI_STAT_unhandled_local_responses,
348
349	/* Commands we sent out to the IPMB bus. */
350	IPMI_STAT_sent_ipmb_commands,
351
352	/* Commands sent on the IPMB that had errors on the SEND CMD */
353	IPMI_STAT_sent_ipmb_command_errs,
354
355	/* Each retransmit increments this count. */
356	IPMI_STAT_retransmitted_ipmb_commands,
357
358	/*
359	 * When a message times out (runs out of retransmits) this is
360	 * incremented.
361	 */
362	IPMI_STAT_timed_out_ipmb_commands,
363
364	/*
365	 * This is like above, but for broadcasts.  Broadcasts are
366	 * *not* included in the above count (they are expected to
367	 * time out).
368	 */
369	IPMI_STAT_timed_out_ipmb_broadcasts,
370
371	/* Responses I have sent to the IPMB bus. */
372	IPMI_STAT_sent_ipmb_responses,
373
374	/* The response was delivered to the user. */
375	IPMI_STAT_handled_ipmb_responses,
376
377	/* The response had invalid data in it. */
378	IPMI_STAT_invalid_ipmb_responses,
379
380	/* The response didn't have anyone waiting for it. */
381	IPMI_STAT_unhandled_ipmb_responses,
382
383	/* Commands we sent out to the IPMB bus. */
384	IPMI_STAT_sent_lan_commands,
385
386	/* Commands sent on the IPMB that had errors on the SEND CMD */
387	IPMI_STAT_sent_lan_command_errs,
388
389	/* Each retransmit increments this count. */
390	IPMI_STAT_retransmitted_lan_commands,
391
392	/*
393	 * When a message times out (runs out of retransmits) this is
394	 * incremented.
395	 */
396	IPMI_STAT_timed_out_lan_commands,
397
398	/* Responses I have sent to the IPMB bus. */
399	IPMI_STAT_sent_lan_responses,
400
401	/* The response was delivered to the user. */
402	IPMI_STAT_handled_lan_responses,
403
404	/* The response had invalid data in it. */
405	IPMI_STAT_invalid_lan_responses,
406
407	/* The response didn't have anyone waiting for it. */
408	IPMI_STAT_unhandled_lan_responses,
409
410	/* The command was delivered to the user. */
411	IPMI_STAT_handled_commands,
412
413	/* The command had invalid data in it. */
414	IPMI_STAT_invalid_commands,
415
416	/* The command didn't have anyone waiting for it. */
417	IPMI_STAT_unhandled_commands,
418
419	/* Invalid data in an event. */
420	IPMI_STAT_invalid_events,
421
422	/* Events that were received with the proper format. */
423	IPMI_STAT_events,
424
425	/* Retransmissions on IPMB that failed. */
426	IPMI_STAT_dropped_rexmit_ipmb_commands,
427
428	/* Retransmissions on LAN that failed. */
429	IPMI_STAT_dropped_rexmit_lan_commands,
430
431	/* This *must* remain last, add new values above this. */
432	IPMI_NUM_STATS
433};
434
435
436#define IPMI_IPMB_NUM_SEQ	64
437struct ipmi_smi {
438	struct module *owner;
439
440	/* What interface number are we? */
441	int intf_num;
442
443	struct kref refcount;
444
445	/* Set when the interface is being unregistered. */
446	bool in_shutdown;
447
448	/* Used for a list of interfaces. */
449	struct list_head link;
450
451	/*
452	 * The list of upper layers that are using me.  seq_lock write
453	 * protects this.  Read protection is with srcu.
454	 */
455	struct list_head users;
456	struct srcu_struct users_srcu;
457
458	/* Used for wake ups at startup. */
459	wait_queue_head_t waitq;
460
461	/*
462	 * Prevents the interface from being unregistered when the
463	 * interface is used by being looked up through the BMC
464	 * structure.
465	 */
466	struct mutex bmc_reg_mutex;
467
468	struct bmc_device tmp_bmc;
469	struct bmc_device *bmc;
470	bool bmc_registered;
471	struct list_head bmc_link;
472	char *my_dev_name;
473	bool in_bmc_register;  /* Handle recursive situations.  Yuck. */
474	struct work_struct bmc_reg_work;
475
476	const struct ipmi_smi_handlers *handlers;
477	void                     *send_info;
478
479	/* Driver-model device for the system interface. */
480	struct device          *si_dev;
481
482	/*
483	 * A table of sequence numbers for this interface.  We use the
484	 * sequence numbers for IPMB messages that go out of the
485	 * interface to match them up with their responses.  A routine
486	 * is called periodically to time the items in this list.
487	 */
488	spinlock_t       seq_lock;
489	struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
490	int curr_seq;
491
492	/*
493	 * Messages queued for delivery.  If delivery fails (out of memory
494	 * for instance), They will stay in here to be processed later in a
495	 * periodic timer interrupt.  The tasklet is for handling received
496	 * messages directly from the handler.
497	 */
498	spinlock_t       waiting_rcv_msgs_lock;
499	struct list_head waiting_rcv_msgs;
500	atomic_t	 watchdog_pretimeouts_to_deliver;
501	struct tasklet_struct recv_tasklet;
502
503	spinlock_t             xmit_msgs_lock;
504	struct list_head       xmit_msgs;
505	struct ipmi_smi_msg    *curr_msg;
506	struct list_head       hp_xmit_msgs;
507
508	/*
509	 * The list of command receivers that are registered for commands
510	 * on this interface.
511	 */
512	struct mutex     cmd_rcvrs_mutex;
513	struct list_head cmd_rcvrs;
514
515	/*
516	 * Events that were queues because no one was there to receive
517	 * them.
518	 */
519	spinlock_t       events_lock; /* For dealing with event stuff. */
520	struct list_head waiting_events;
521	unsigned int     waiting_events_count; /* How many events in queue? */
522	char             delivering_events;
523	char             event_msg_printed;
524
525	/* How many users are waiting for events? */
526	atomic_t         event_waiters;
527	unsigned int     ticks_to_req_ev;
528
529	spinlock_t       watch_lock; /* For dealing with watch stuff below. */
530
531	/* How many users are waiting for commands? */
532	unsigned int     command_waiters;
533
534	/* How many users are waiting for watchdogs? */
535	unsigned int     watchdog_waiters;
536
537	/* How many users are waiting for message responses? */
538	unsigned int     response_waiters;
539
540	/*
541	 * Tells what the lower layer has last been asked to watch for,
542	 * messages and/or watchdogs.  Protected by watch_lock.
543	 */
544	unsigned int     last_watch_mask;
545
546	/*
547	 * The event receiver for my BMC, only really used at panic
548	 * shutdown as a place to store this.
549	 */
550	unsigned char event_receiver;
551	unsigned char event_receiver_lun;
552	unsigned char local_sel_device;
553	unsigned char local_event_generator;
554
555	/* For handling of maintenance mode. */
556	int maintenance_mode;
557	bool maintenance_mode_enable;
558	int auto_maintenance_timeout;
559	spinlock_t maintenance_mode_lock; /* Used in a timer... */
560
561	/*
562	 * If we are doing maintenance on something on IPMB, extend
563	 * the timeout time to avoid timeouts writing firmware and
564	 * such.
565	 */
566	int ipmb_maintenance_mode_timeout;
567
568	/*
569	 * A cheap hack, if this is non-null and a message to an
570	 * interface comes in with a NULL user, call this routine with
571	 * it.  Note that the message will still be freed by the
572	 * caller.  This only works on the system interface.
573	 *
574	 * Protected by bmc_reg_mutex.
575	 */
576	void (*null_user_handler)(struct ipmi_smi *intf,
577				  struct ipmi_recv_msg *msg);
578
579	/*
580	 * When we are scanning the channels for an SMI, this will
581	 * tell which channel we are scanning.
582	 */
583	int curr_channel;
584
585	/* Channel information */
586	struct ipmi_channel_set *channel_list;
587	unsigned int curr_working_cset; /* First index into the following. */
588	struct ipmi_channel_set wchannels[2];
589	struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS];
590	bool channels_ready;
591
592	atomic_t stats[IPMI_NUM_STATS];
593
594	/*
595	 * run_to_completion duplicate of smb_info, smi_info
596	 * and ipmi_serial_info structures. Used to decrease numbers of
597	 * parameters passed by "low" level IPMI code.
598	 */
599	int run_to_completion;
600};
601#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
602
603static void __get_guid(struct ipmi_smi *intf);
604static void __ipmi_bmc_unregister(struct ipmi_smi *intf);
605static int __ipmi_bmc_register(struct ipmi_smi *intf,
606			       struct ipmi_device_id *id,
607			       bool guid_set, guid_t *guid, int intf_num);
608static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
609
610
611/**
612 * The driver model view of the IPMI messaging driver.
613 */
614static struct platform_driver ipmidriver = {
615	.driver = {
616		.name = "ipmi",
617		.bus = &platform_bus_type
618	}
619};
620/*
621 * This mutex keeps us from adding the same BMC twice.
622 */
623static DEFINE_MUTEX(ipmidriver_mutex);
624
625static LIST_HEAD(ipmi_interfaces);
626static DEFINE_MUTEX(ipmi_interfaces_mutex);
627#define ipmi_interfaces_mutex_held() \
628	lockdep_is_held(&ipmi_interfaces_mutex)
629static struct srcu_struct ipmi_interfaces_srcu;
630
631/*
632 * List of watchers that want to know when smi's are added and deleted.
633 */
634static LIST_HEAD(smi_watchers);
635static DEFINE_MUTEX(smi_watchers_mutex);
636
637#define ipmi_inc_stat(intf, stat) \
638	atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
639#define ipmi_get_stat(intf, stat) \
640	((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
641
642static const char * const addr_src_to_str[] = {
643	"invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
644	"device-tree", "platform"
645};
646
647const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
648{
649	if (src >= SI_LAST)
650		src = 0; /* Invalid */
651	return addr_src_to_str[src];
652}
653EXPORT_SYMBOL(ipmi_addr_src_to_str);
654
655static int is_lan_addr(struct ipmi_addr *addr)
656{
657	return addr->addr_type == IPMI_LAN_ADDR_TYPE;
658}
659
660static int is_ipmb_addr(struct ipmi_addr *addr)
661{
662	return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
663}
664
665static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
666{
667	return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
668}
669
670static void free_recv_msg_list(struct list_head *q)
671{
672	struct ipmi_recv_msg *msg, *msg2;
673
674	list_for_each_entry_safe(msg, msg2, q, link) {
675		list_del(&msg->link);
676		ipmi_free_recv_msg(msg);
677	}
678}
679
680static void free_smi_msg_list(struct list_head *q)
681{
682	struct ipmi_smi_msg *msg, *msg2;
683
684	list_for_each_entry_safe(msg, msg2, q, link) {
685		list_del(&msg->link);
686		ipmi_free_smi_msg(msg);
687	}
688}
689
690static void clean_up_interface_data(struct ipmi_smi *intf)
691{
692	int              i;
693	struct cmd_rcvr  *rcvr, *rcvr2;
694	struct list_head list;
695
696	tasklet_kill(&intf->recv_tasklet);
697
698	free_smi_msg_list(&intf->waiting_rcv_msgs);
699	free_recv_msg_list(&intf->waiting_events);
700
701	/*
702	 * Wholesale remove all the entries from the list in the
703	 * interface and wait for RCU to know that none are in use.
704	 */
705	mutex_lock(&intf->cmd_rcvrs_mutex);
706	INIT_LIST_HEAD(&list);
707	list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
708	mutex_unlock(&intf->cmd_rcvrs_mutex);
709
710	list_for_each_entry_safe(rcvr, rcvr2, &list, link)
711		kfree(rcvr);
712
713	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
714		if ((intf->seq_table[i].inuse)
715					&& (intf->seq_table[i].recv_msg))
716			ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
717	}
718}
719
720static void intf_free(struct kref *ref)
721{
722	struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
723
724	clean_up_interface_data(intf);
725	kfree(intf);
726}
727
728struct watcher_entry {
729	int              intf_num;
730	struct ipmi_smi  *intf;
731	struct list_head link;
732};
733
734int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
735{
736	struct ipmi_smi *intf;
737	int index, rv;
738
739	/*
740	 * Make sure the driver is actually initialized, this handles
741	 * problems with initialization order.
742	 */
743	rv = ipmi_init_msghandler();
744	if (rv)
745		return rv;
746
747	mutex_lock(&smi_watchers_mutex);
748
749	list_add(&watcher->link, &smi_watchers);
750
751	index = srcu_read_lock(&ipmi_interfaces_srcu);
752	list_for_each_entry_rcu(intf, &ipmi_interfaces, link,
753			lockdep_is_held(&smi_watchers_mutex)) {
754		int intf_num = READ_ONCE(intf->intf_num);
755
756		if (intf_num == -1)
757			continue;
758		watcher->new_smi(intf_num, intf->si_dev);
759	}
760	srcu_read_unlock(&ipmi_interfaces_srcu, index);
761
762	mutex_unlock(&smi_watchers_mutex);
763
764	return 0;
765}
766EXPORT_SYMBOL(ipmi_smi_watcher_register);
767
768int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
769{
770	mutex_lock(&smi_watchers_mutex);
771	list_del(&watcher->link);
772	mutex_unlock(&smi_watchers_mutex);
773	return 0;
774}
775EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
776
777/*
778 * Must be called with smi_watchers_mutex held.
779 */
780static void
781call_smi_watchers(int i, struct device *dev)
782{
783	struct ipmi_smi_watcher *w;
784
785	mutex_lock(&smi_watchers_mutex);
786	list_for_each_entry(w, &smi_watchers, link) {
787		if (try_module_get(w->owner)) {
788			w->new_smi(i, dev);
789			module_put(w->owner);
790		}
791	}
792	mutex_unlock(&smi_watchers_mutex);
793}
794
795static int
796ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
797{
798	if (addr1->addr_type != addr2->addr_type)
799		return 0;
800
801	if (addr1->channel != addr2->channel)
802		return 0;
803
804	if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
805		struct ipmi_system_interface_addr *smi_addr1
806		    = (struct ipmi_system_interface_addr *) addr1;
807		struct ipmi_system_interface_addr *smi_addr2
808		    = (struct ipmi_system_interface_addr *) addr2;
809		return (smi_addr1->lun == smi_addr2->lun);
810	}
811
812	if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
813		struct ipmi_ipmb_addr *ipmb_addr1
814		    = (struct ipmi_ipmb_addr *) addr1;
815		struct ipmi_ipmb_addr *ipmb_addr2
816		    = (struct ipmi_ipmb_addr *) addr2;
817
818		return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
819			&& (ipmb_addr1->lun == ipmb_addr2->lun));
820	}
821
822	if (is_lan_addr(addr1)) {
823		struct ipmi_lan_addr *lan_addr1
824			= (struct ipmi_lan_addr *) addr1;
825		struct ipmi_lan_addr *lan_addr2
826		    = (struct ipmi_lan_addr *) addr2;
827
828		return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
829			&& (lan_addr1->local_SWID == lan_addr2->local_SWID)
830			&& (lan_addr1->session_handle
831			    == lan_addr2->session_handle)
832			&& (lan_addr1->lun == lan_addr2->lun));
833	}
834
835	return 1;
836}
837
838int ipmi_validate_addr(struct ipmi_addr *addr, int len)
839{
840	if (len < sizeof(struct ipmi_system_interface_addr))
841		return -EINVAL;
842
843	if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
844		if (addr->channel != IPMI_BMC_CHANNEL)
845			return -EINVAL;
846		return 0;
847	}
848
849	if ((addr->channel == IPMI_BMC_CHANNEL)
850	    || (addr->channel >= IPMI_MAX_CHANNELS)
851	    || (addr->channel < 0))
852		return -EINVAL;
853
854	if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
855		if (len < sizeof(struct ipmi_ipmb_addr))
856			return -EINVAL;
857		return 0;
858	}
859
860	if (is_lan_addr(addr)) {
861		if (len < sizeof(struct ipmi_lan_addr))
862			return -EINVAL;
863		return 0;
864	}
865
866	return -EINVAL;
867}
868EXPORT_SYMBOL(ipmi_validate_addr);
869
870unsigned int ipmi_addr_length(int addr_type)
871{
872	if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
873		return sizeof(struct ipmi_system_interface_addr);
874
875	if ((addr_type == IPMI_IPMB_ADDR_TYPE)
876			|| (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
877		return sizeof(struct ipmi_ipmb_addr);
878
879	if (addr_type == IPMI_LAN_ADDR_TYPE)
880		return sizeof(struct ipmi_lan_addr);
881
882	return 0;
883}
884EXPORT_SYMBOL(ipmi_addr_length);
885
886static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
887{
888	int rv = 0;
889
890	if (!msg->user) {
891		/* Special handling for NULL users. */
892		if (intf->null_user_handler) {
893			intf->null_user_handler(intf, msg);
894		} else {
895			/* No handler, so give up. */
896			rv = -EINVAL;
897		}
898		ipmi_free_recv_msg(msg);
899	} else if (oops_in_progress) {
900		/*
901		 * If we are running in the panic context, calling the
902		 * receive handler doesn't much meaning and has a deadlock
903		 * risk.  At this moment, simply skip it in that case.
904		 */
905		ipmi_free_recv_msg(msg);
906	} else {
907		int index;
908		struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
909
910		if (user) {
911			user->handler->ipmi_recv_hndl(msg, user->handler_data);
912			release_ipmi_user(user, index);
913		} else {
914			/* User went away, give up. */
915			ipmi_free_recv_msg(msg);
916			rv = -EINVAL;
917		}
918	}
919
920	return rv;
921}
922
923static void deliver_local_response(struct ipmi_smi *intf,
924				   struct ipmi_recv_msg *msg)
925{
926	if (deliver_response(intf, msg))
927		ipmi_inc_stat(intf, unhandled_local_responses);
928	else
929		ipmi_inc_stat(intf, handled_local_responses);
930}
931
932static void deliver_err_response(struct ipmi_smi *intf,
933				 struct ipmi_recv_msg *msg, int err)
934{
935	msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
936	msg->msg_data[0] = err;
937	msg->msg.netfn |= 1; /* Convert to a response. */
938	msg->msg.data_len = 1;
939	msg->msg.data = msg->msg_data;
940	deliver_local_response(intf, msg);
941}
942
943static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags)
944{
945	unsigned long iflags;
946
947	if (!intf->handlers->set_need_watch)
948		return;
949
950	spin_lock_irqsave(&intf->watch_lock, iflags);
951	if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
952		intf->response_waiters++;
953
954	if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
955		intf->watchdog_waiters++;
956
957	if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
958		intf->command_waiters++;
959
960	if ((intf->last_watch_mask & flags) != flags) {
961		intf->last_watch_mask |= flags;
962		intf->handlers->set_need_watch(intf->send_info,
963					       intf->last_watch_mask);
964	}
965	spin_unlock_irqrestore(&intf->watch_lock, iflags);
966}
967
968static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags)
969{
970	unsigned long iflags;
971
972	if (!intf->handlers->set_need_watch)
973		return;
974
975	spin_lock_irqsave(&intf->watch_lock, iflags);
976	if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
977		intf->response_waiters--;
978
979	if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
980		intf->watchdog_waiters--;
981
982	if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
983		intf->command_waiters--;
984
985	flags = 0;
986	if (intf->response_waiters)
987		flags |= IPMI_WATCH_MASK_CHECK_MESSAGES;
988	if (intf->watchdog_waiters)
989		flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG;
990	if (intf->command_waiters)
991		flags |= IPMI_WATCH_MASK_CHECK_COMMANDS;
992
993	if (intf->last_watch_mask != flags) {
994		intf->last_watch_mask = flags;
995		intf->handlers->set_need_watch(intf->send_info,
996					       intf->last_watch_mask);
997	}
998	spin_unlock_irqrestore(&intf->watch_lock, iflags);
999}
1000
1001/*
1002 * Find the next sequence number not being used and add the given
1003 * message with the given timeout to the sequence table.  This must be
1004 * called with the interface's seq_lock held.
1005 */
1006static int intf_next_seq(struct ipmi_smi      *intf,
1007			 struct ipmi_recv_msg *recv_msg,
1008			 unsigned long        timeout,
1009			 int                  retries,
1010			 int                  broadcast,
1011			 unsigned char        *seq,
1012			 long                 *seqid)
1013{
1014	int          rv = 0;
1015	unsigned int i;
1016
1017	if (timeout == 0)
1018		timeout = default_retry_ms;
1019	if (retries < 0)
1020		retries = default_max_retries;
1021
1022	for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
1023					i = (i+1)%IPMI_IPMB_NUM_SEQ) {
1024		if (!intf->seq_table[i].inuse)
1025			break;
1026	}
1027
1028	if (!intf->seq_table[i].inuse) {
1029		intf->seq_table[i].recv_msg = recv_msg;
1030
1031		/*
1032		 * Start with the maximum timeout, when the send response
1033		 * comes in we will start the real timer.
1034		 */
1035		intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
1036		intf->seq_table[i].orig_timeout = timeout;
1037		intf->seq_table[i].retries_left = retries;
1038		intf->seq_table[i].broadcast = broadcast;
1039		intf->seq_table[i].inuse = 1;
1040		intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
1041		*seq = i;
1042		*seqid = intf->seq_table[i].seqid;
1043		intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
1044		smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1045		need_waiter(intf);
1046	} else {
1047		rv = -EAGAIN;
1048	}
1049
1050	return rv;
1051}
1052
1053/*
1054 * Return the receive message for the given sequence number and
1055 * release the sequence number so it can be reused.  Some other data
1056 * is passed in to be sure the message matches up correctly (to help
1057 * guard against message coming in after their timeout and the
1058 * sequence number being reused).
1059 */
1060static int intf_find_seq(struct ipmi_smi      *intf,
1061			 unsigned char        seq,
1062			 short                channel,
1063			 unsigned char        cmd,
1064			 unsigned char        netfn,
1065			 struct ipmi_addr     *addr,
1066			 struct ipmi_recv_msg **recv_msg)
1067{
1068	int           rv = -ENODEV;
1069	unsigned long flags;
1070
1071	if (seq >= IPMI_IPMB_NUM_SEQ)
1072		return -EINVAL;
1073
1074	spin_lock_irqsave(&intf->seq_lock, flags);
1075	if (intf->seq_table[seq].inuse) {
1076		struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
1077
1078		if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
1079				&& (msg->msg.netfn == netfn)
1080				&& (ipmi_addr_equal(addr, &msg->addr))) {
1081			*recv_msg = msg;
1082			intf->seq_table[seq].inuse = 0;
1083			smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1084			rv = 0;
1085		}
1086	}
1087	spin_unlock_irqrestore(&intf->seq_lock, flags);
1088
1089	return rv;
1090}
1091
1092
1093/* Start the timer for a specific sequence table entry. */
1094static int intf_start_seq_timer(struct ipmi_smi *intf,
1095				long       msgid)
1096{
1097	int           rv = -ENODEV;
1098	unsigned long flags;
1099	unsigned char seq;
1100	unsigned long seqid;
1101
1102
1103	GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1104
1105	spin_lock_irqsave(&intf->seq_lock, flags);
1106	/*
1107	 * We do this verification because the user can be deleted
1108	 * while a message is outstanding.
1109	 */
1110	if ((intf->seq_table[seq].inuse)
1111				&& (intf->seq_table[seq].seqid == seqid)) {
1112		struct seq_table *ent = &intf->seq_table[seq];
1113		ent->timeout = ent->orig_timeout;
1114		rv = 0;
1115	}
1116	spin_unlock_irqrestore(&intf->seq_lock, flags);
1117
1118	return rv;
1119}
1120
1121/* Got an error for the send message for a specific sequence number. */
1122static int intf_err_seq(struct ipmi_smi *intf,
1123			long         msgid,
1124			unsigned int err)
1125{
1126	int                  rv = -ENODEV;
1127	unsigned long        flags;
1128	unsigned char        seq;
1129	unsigned long        seqid;
1130	struct ipmi_recv_msg *msg = NULL;
1131
1132
1133	GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1134
1135	spin_lock_irqsave(&intf->seq_lock, flags);
1136	/*
1137	 * We do this verification because the user can be deleted
1138	 * while a message is outstanding.
1139	 */
1140	if ((intf->seq_table[seq].inuse)
1141				&& (intf->seq_table[seq].seqid == seqid)) {
1142		struct seq_table *ent = &intf->seq_table[seq];
1143
1144		ent->inuse = 0;
1145		smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1146		msg = ent->recv_msg;
1147		rv = 0;
1148	}
1149	spin_unlock_irqrestore(&intf->seq_lock, flags);
1150
1151	if (msg)
1152		deliver_err_response(intf, msg, err);
1153
1154	return rv;
1155}
1156
1157static void free_user_work(struct work_struct *work)
1158{
1159	struct ipmi_user *user = container_of(work, struct ipmi_user,
1160					      remove_work);
1161
1162	cleanup_srcu_struct(&user->release_barrier);
1163	vfree(user);
1164}
1165
1166int ipmi_create_user(unsigned int          if_num,
1167		     const struct ipmi_user_hndl *handler,
1168		     void                  *handler_data,
1169		     struct ipmi_user      **user)
1170{
1171	unsigned long flags;
1172	struct ipmi_user *new_user;
1173	int           rv, index;
1174	struct ipmi_smi *intf;
1175
1176	/*
1177	 * There is no module usecount here, because it's not
1178	 * required.  Since this can only be used by and called from
1179	 * other modules, they will implicitly use this module, and
1180	 * thus this can't be removed unless the other modules are
1181	 * removed.
1182	 */
1183
1184	if (handler == NULL)
1185		return -EINVAL;
1186
1187	/*
1188	 * Make sure the driver is actually initialized, this handles
1189	 * problems with initialization order.
1190	 */
1191	rv = ipmi_init_msghandler();
1192	if (rv)
1193		return rv;
1194
1195	new_user = vzalloc(sizeof(*new_user));
1196	if (!new_user)
1197		return -ENOMEM;
1198
1199	index = srcu_read_lock(&ipmi_interfaces_srcu);
1200	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1201		if (intf->intf_num == if_num)
1202			goto found;
1203	}
1204	/* Not found, return an error */
1205	rv = -EINVAL;
1206	goto out_kfree;
1207
1208 found:
1209	INIT_WORK(&new_user->remove_work, free_user_work);
1210
1211	rv = init_srcu_struct(&new_user->release_barrier);
1212	if (rv)
1213		goto out_kfree;
1214
1215	if (!try_module_get(intf->owner)) {
1216		rv = -ENODEV;
1217		goto out_kfree;
1218	}
1219
1220	/* Note that each existing user holds a refcount to the interface. */
1221	kref_get(&intf->refcount);
1222
1223	kref_init(&new_user->refcount);
1224	new_user->handler = handler;
1225	new_user->handler_data = handler_data;
1226	new_user->intf = intf;
1227	new_user->gets_events = false;
1228
1229	rcu_assign_pointer(new_user->self, new_user);
1230	spin_lock_irqsave(&intf->seq_lock, flags);
1231	list_add_rcu(&new_user->link, &intf->users);
1232	spin_unlock_irqrestore(&intf->seq_lock, flags);
1233	if (handler->ipmi_watchdog_pretimeout)
1234		/* User wants pretimeouts, so make sure to watch for them. */
1235		smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1236	srcu_read_unlock(&ipmi_interfaces_srcu, index);
1237	*user = new_user;
1238	return 0;
1239
1240out_kfree:
1241	srcu_read_unlock(&ipmi_interfaces_srcu, index);
1242	vfree(new_user);
1243	return rv;
1244}
1245EXPORT_SYMBOL(ipmi_create_user);
1246
1247int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
1248{
1249	int rv, index;
1250	struct ipmi_smi *intf;
1251
1252	index = srcu_read_lock(&ipmi_interfaces_srcu);
1253	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1254		if (intf->intf_num == if_num)
1255			goto found;
1256	}
1257	srcu_read_unlock(&ipmi_interfaces_srcu, index);
1258
1259	/* Not found, return an error */
1260	return -EINVAL;
1261
1262found:
1263	if (!intf->handlers->get_smi_info)
1264		rv = -ENOTTY;
1265	else
1266		rv = intf->handlers->get_smi_info(intf->send_info, data);
1267	srcu_read_unlock(&ipmi_interfaces_srcu, index);
1268
1269	return rv;
1270}
1271EXPORT_SYMBOL(ipmi_get_smi_info);
1272
1273static void free_user(struct kref *ref)
1274{
1275	struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
1276
1277	/* SRCU cleanup must happen in task context. */
1278	queue_work(remove_work_wq, &user->remove_work);
1279}
1280
1281static void _ipmi_destroy_user(struct ipmi_user *user)
1282{
1283	struct ipmi_smi  *intf = user->intf;
1284	int              i;
1285	unsigned long    flags;
1286	struct cmd_rcvr  *rcvr;
1287	struct cmd_rcvr  *rcvrs = NULL;
1288	struct module    *owner;
1289
1290	if (!acquire_ipmi_user(user, &i)) {
1291		/*
1292		 * The user has already been cleaned up, just make sure
1293		 * nothing is using it and return.
1294		 */
1295		synchronize_srcu(&user->release_barrier);
1296		return;
1297	}
1298
1299	rcu_assign_pointer(user->self, NULL);
1300	release_ipmi_user(user, i);
1301
1302	synchronize_srcu(&user->release_barrier);
1303
1304	if (user->handler->shutdown)
1305		user->handler->shutdown(user->handler_data);
1306
1307	if (user->handler->ipmi_watchdog_pretimeout)
1308		smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1309
1310	if (user->gets_events)
1311		atomic_dec(&intf->event_waiters);
1312
1313	/* Remove the user from the interface's sequence table. */
1314	spin_lock_irqsave(&intf->seq_lock, flags);
1315	list_del_rcu(&user->link);
1316
1317	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
1318		if (intf->seq_table[i].inuse
1319		    && (intf->seq_table[i].recv_msg->user == user)) {
1320			intf->seq_table[i].inuse = 0;
1321			smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1322			ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
1323		}
1324	}
1325	spin_unlock_irqrestore(&intf->seq_lock, flags);
1326
1327	/*
1328	 * Remove the user from the command receiver's table.  First
1329	 * we build a list of everything (not using the standard link,
1330	 * since other things may be using it till we do
1331	 * synchronize_srcu()) then free everything in that list.
1332	 */
1333	mutex_lock(&intf->cmd_rcvrs_mutex);
1334	list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1335				lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1336		if (rcvr->user == user) {
1337			list_del_rcu(&rcvr->link);
1338			rcvr->next = rcvrs;
1339			rcvrs = rcvr;
1340		}
1341	}
1342	mutex_unlock(&intf->cmd_rcvrs_mutex);
1343	synchronize_rcu();
1344	while (rcvrs) {
1345		rcvr = rcvrs;
1346		rcvrs = rcvr->next;
1347		kfree(rcvr);
1348	}
1349
1350	owner = intf->owner;
1351	kref_put(&intf->refcount, intf_free);
1352	module_put(owner);
1353}
1354
1355int ipmi_destroy_user(struct ipmi_user *user)
1356{
1357	_ipmi_destroy_user(user);
1358
1359	kref_put(&user->refcount, free_user);
1360
1361	return 0;
1362}
1363EXPORT_SYMBOL(ipmi_destroy_user);
1364
1365int ipmi_get_version(struct ipmi_user *user,
1366		     unsigned char *major,
1367		     unsigned char *minor)
1368{
1369	struct ipmi_device_id id;
1370	int rv, index;
1371
1372	user = acquire_ipmi_user(user, &index);
1373	if (!user)
1374		return -ENODEV;
1375
1376	rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL);
1377	if (!rv) {
1378		*major = ipmi_version_major(&id);
1379		*minor = ipmi_version_minor(&id);
1380	}
1381	release_ipmi_user(user, index);
1382
1383	return rv;
1384}
1385EXPORT_SYMBOL(ipmi_get_version);
1386
1387int ipmi_set_my_address(struct ipmi_user *user,
1388			unsigned int  channel,
1389			unsigned char address)
1390{
1391	int index, rv = 0;
1392
1393	user = acquire_ipmi_user(user, &index);
1394	if (!user)
1395		return -ENODEV;
1396
1397	if (channel >= IPMI_MAX_CHANNELS) {
1398		rv = -EINVAL;
1399	} else {
1400		channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1401		user->intf->addrinfo[channel].address = address;
1402	}
1403	release_ipmi_user(user, index);
1404
1405	return rv;
1406}
1407EXPORT_SYMBOL(ipmi_set_my_address);
1408
1409int ipmi_get_my_address(struct ipmi_user *user,
1410			unsigned int  channel,
1411			unsigned char *address)
1412{
1413	int index, rv = 0;
1414
1415	user = acquire_ipmi_user(user, &index);
1416	if (!user)
1417		return -ENODEV;
1418
1419	if (channel >= IPMI_MAX_CHANNELS) {
1420		rv = -EINVAL;
1421	} else {
1422		channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1423		*address = user->intf->addrinfo[channel].address;
1424	}
1425	release_ipmi_user(user, index);
1426
1427	return rv;
1428}
1429EXPORT_SYMBOL(ipmi_get_my_address);
1430
1431int ipmi_set_my_LUN(struct ipmi_user *user,
1432		    unsigned int  channel,
1433		    unsigned char LUN)
1434{
1435	int index, rv = 0;
1436
1437	user = acquire_ipmi_user(user, &index);
1438	if (!user)
1439		return -ENODEV;
1440
1441	if (channel >= IPMI_MAX_CHANNELS) {
1442		rv = -EINVAL;
1443	} else {
1444		channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1445		user->intf->addrinfo[channel].lun = LUN & 0x3;
1446	}
1447	release_ipmi_user(user, index);
1448
1449	return rv;
1450}
1451EXPORT_SYMBOL(ipmi_set_my_LUN);
1452
1453int ipmi_get_my_LUN(struct ipmi_user *user,
1454		    unsigned int  channel,
1455		    unsigned char *address)
1456{
1457	int index, rv = 0;
1458
1459	user = acquire_ipmi_user(user, &index);
1460	if (!user)
1461		return -ENODEV;
1462
1463	if (channel >= IPMI_MAX_CHANNELS) {
1464		rv = -EINVAL;
1465	} else {
1466		channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1467		*address = user->intf->addrinfo[channel].lun;
1468	}
1469	release_ipmi_user(user, index);
1470
1471	return rv;
1472}
1473EXPORT_SYMBOL(ipmi_get_my_LUN);
1474
1475int ipmi_get_maintenance_mode(struct ipmi_user *user)
1476{
1477	int mode, index;
1478	unsigned long flags;
1479
1480	user = acquire_ipmi_user(user, &index);
1481	if (!user)
1482		return -ENODEV;
1483
1484	spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1485	mode = user->intf->maintenance_mode;
1486	spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1487	release_ipmi_user(user, index);
1488
1489	return mode;
1490}
1491EXPORT_SYMBOL(ipmi_get_maintenance_mode);
1492
1493static void maintenance_mode_update(struct ipmi_smi *intf)
1494{
1495	if (intf->handlers->set_maintenance_mode)
1496		intf->handlers->set_maintenance_mode(
1497			intf->send_info, intf->maintenance_mode_enable);
1498}
1499
1500int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
1501{
1502	int rv = 0, index;
1503	unsigned long flags;
1504	struct ipmi_smi *intf = user->intf;
1505
1506	user = acquire_ipmi_user(user, &index);
1507	if (!user)
1508		return -ENODEV;
1509
1510	spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1511	if (intf->maintenance_mode != mode) {
1512		switch (mode) {
1513		case IPMI_MAINTENANCE_MODE_AUTO:
1514			intf->maintenance_mode_enable
1515				= (intf->auto_maintenance_timeout > 0);
1516			break;
1517
1518		case IPMI_MAINTENANCE_MODE_OFF:
1519			intf->maintenance_mode_enable = false;
1520			break;
1521
1522		case IPMI_MAINTENANCE_MODE_ON:
1523			intf->maintenance_mode_enable = true;
1524			break;
1525
1526		default:
1527			rv = -EINVAL;
1528			goto out_unlock;
1529		}
1530		intf->maintenance_mode = mode;
1531
1532		maintenance_mode_update(intf);
1533	}
1534 out_unlock:
1535	spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1536	release_ipmi_user(user, index);
1537
1538	return rv;
1539}
1540EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1541
1542int ipmi_set_gets_events(struct ipmi_user *user, bool val)
1543{
1544	unsigned long        flags;
1545	struct ipmi_smi      *intf = user->intf;
1546	struct ipmi_recv_msg *msg, *msg2;
1547	struct list_head     msgs;
1548	int index;
1549
1550	user = acquire_ipmi_user(user, &index);
1551	if (!user)
1552		return -ENODEV;
1553
1554	INIT_LIST_HEAD(&msgs);
1555
1556	spin_lock_irqsave(&intf->events_lock, flags);
1557	if (user->gets_events == val)
1558		goto out;
1559
1560	user->gets_events = val;
1561
1562	if (val) {
1563		if (atomic_inc_return(&intf->event_waiters) == 1)
1564			need_waiter(intf);
1565	} else {
1566		atomic_dec(&intf->event_waiters);
1567	}
1568
1569	if (intf->delivering_events)
1570		/*
1571		 * Another thread is delivering events for this, so
1572		 * let it handle any new events.
1573		 */
1574		goto out;
1575
1576	/* Deliver any queued events. */
1577	while (user->gets_events && !list_empty(&intf->waiting_events)) {
1578		list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1579			list_move_tail(&msg->link, &msgs);
1580		intf->waiting_events_count = 0;
1581		if (intf->event_msg_printed) {
1582			dev_warn(intf->si_dev, "Event queue no longer full\n");
1583			intf->event_msg_printed = 0;
1584		}
1585
1586		intf->delivering_events = 1;
1587		spin_unlock_irqrestore(&intf->events_lock, flags);
1588
1589		list_for_each_entry_safe(msg, msg2, &msgs, link) {
1590			msg->user = user;
1591			kref_get(&user->refcount);
1592			deliver_local_response(intf, msg);
1593		}
1594
1595		spin_lock_irqsave(&intf->events_lock, flags);
1596		intf->delivering_events = 0;
1597	}
1598
1599 out:
1600	spin_unlock_irqrestore(&intf->events_lock, flags);
1601	release_ipmi_user(user, index);
1602
1603	return 0;
1604}
1605EXPORT_SYMBOL(ipmi_set_gets_events);
1606
1607static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf,
1608				      unsigned char netfn,
1609				      unsigned char cmd,
1610				      unsigned char chan)
1611{
1612	struct cmd_rcvr *rcvr;
1613
1614	list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1615				lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1616		if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1617					&& (rcvr->chans & (1 << chan)))
1618			return rcvr;
1619	}
1620	return NULL;
1621}
1622
1623static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf,
1624				 unsigned char netfn,
1625				 unsigned char cmd,
1626				 unsigned int  chans)
1627{
1628	struct cmd_rcvr *rcvr;
1629
1630	list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1631				lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1632		if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1633					&& (rcvr->chans & chans))
1634			return 0;
1635	}
1636	return 1;
1637}
1638
1639int ipmi_register_for_cmd(struct ipmi_user *user,
1640			  unsigned char netfn,
1641			  unsigned char cmd,
1642			  unsigned int  chans)
1643{
1644	struct ipmi_smi *intf = user->intf;
1645	struct cmd_rcvr *rcvr;
1646	int rv = 0, index;
1647
1648	user = acquire_ipmi_user(user, &index);
1649	if (!user)
1650		return -ENODEV;
1651
1652	rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1653	if (!rcvr) {
1654		rv = -ENOMEM;
1655		goto out_release;
1656	}
1657	rcvr->cmd = cmd;
1658	rcvr->netfn = netfn;
1659	rcvr->chans = chans;
1660	rcvr->user = user;
1661
1662	mutex_lock(&intf->cmd_rcvrs_mutex);
1663	/* Make sure the command/netfn is not already registered. */
1664	if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1665		rv = -EBUSY;
1666		goto out_unlock;
1667	}
1668
1669	smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1670
1671	list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1672
1673out_unlock:
1674	mutex_unlock(&intf->cmd_rcvrs_mutex);
1675	if (rv)
1676		kfree(rcvr);
1677out_release:
1678	release_ipmi_user(user, index);
1679
1680	return rv;
1681}
1682EXPORT_SYMBOL(ipmi_register_for_cmd);
1683
1684int ipmi_unregister_for_cmd(struct ipmi_user *user,
1685			    unsigned char netfn,
1686			    unsigned char cmd,
1687			    unsigned int  chans)
1688{
1689	struct ipmi_smi *intf = user->intf;
1690	struct cmd_rcvr *rcvr;
1691	struct cmd_rcvr *rcvrs = NULL;
1692	int i, rv = -ENOENT, index;
1693
1694	user = acquire_ipmi_user(user, &index);
1695	if (!user)
1696		return -ENODEV;
1697
1698	mutex_lock(&intf->cmd_rcvrs_mutex);
1699	for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1700		if (((1 << i) & chans) == 0)
1701			continue;
1702		rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1703		if (rcvr == NULL)
1704			continue;
1705		if (rcvr->user == user) {
1706			rv = 0;
1707			rcvr->chans &= ~chans;
1708			if (rcvr->chans == 0) {
1709				list_del_rcu(&rcvr->link);
1710				rcvr->next = rcvrs;
1711				rcvrs = rcvr;
1712			}
1713		}
1714	}
1715	mutex_unlock(&intf->cmd_rcvrs_mutex);
1716	synchronize_rcu();
1717	release_ipmi_user(user, index);
1718	while (rcvrs) {
1719		smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1720		rcvr = rcvrs;
1721		rcvrs = rcvr->next;
1722		kfree(rcvr);
1723	}
1724
1725	return rv;
1726}
1727EXPORT_SYMBOL(ipmi_unregister_for_cmd);
1728
1729static unsigned char
1730ipmb_checksum(unsigned char *data, int size)
1731{
1732	unsigned char csum = 0;
1733
1734	for (; size > 0; size--, data++)
1735		csum += *data;
1736
1737	return -csum;
1738}
1739
1740static inline void format_ipmb_msg(struct ipmi_smi_msg   *smi_msg,
1741				   struct kernel_ipmi_msg *msg,
1742				   struct ipmi_ipmb_addr *ipmb_addr,
1743				   long                  msgid,
1744				   unsigned char         ipmb_seq,
1745				   int                   broadcast,
1746				   unsigned char         source_address,
1747				   unsigned char         source_lun)
1748{
1749	int i = broadcast;
1750
1751	/* Format the IPMB header data. */
1752	smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1753	smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1754	smi_msg->data[2] = ipmb_addr->channel;
1755	if (broadcast)
1756		smi_msg->data[3] = 0;
1757	smi_msg->data[i+3] = ipmb_addr->slave_addr;
1758	smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1759	smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2);
1760	smi_msg->data[i+6] = source_address;
1761	smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1762	smi_msg->data[i+8] = msg->cmd;
1763
1764	/* Now tack on the data to the message. */
1765	if (msg->data_len > 0)
1766		memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len);
1767	smi_msg->data_size = msg->data_len + 9;
1768
1769	/* Now calculate the checksum and tack it on. */
1770	smi_msg->data[i+smi_msg->data_size]
1771		= ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6);
1772
1773	/*
1774	 * Add on the checksum size and the offset from the
1775	 * broadcast.
1776	 */
1777	smi_msg->data_size += 1 + i;
1778
1779	smi_msg->msgid = msgid;
1780}
1781
1782static inline void format_lan_msg(struct ipmi_smi_msg   *smi_msg,
1783				  struct kernel_ipmi_msg *msg,
1784				  struct ipmi_lan_addr  *lan_addr,
1785				  long                  msgid,
1786				  unsigned char         ipmb_seq,
1787				  unsigned char         source_lun)
1788{
1789	/* Format the IPMB header data. */
1790	smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1791	smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1792	smi_msg->data[2] = lan_addr->channel;
1793	smi_msg->data[3] = lan_addr->session_handle;
1794	smi_msg->data[4] = lan_addr->remote_SWID;
1795	smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1796	smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2);
1797	smi_msg->data[7] = lan_addr->local_SWID;
1798	smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1799	smi_msg->data[9] = msg->cmd;
1800
1801	/* Now tack on the data to the message. */
1802	if (msg->data_len > 0)
1803		memcpy(&smi_msg->data[10], msg->data, msg->data_len);
1804	smi_msg->data_size = msg->data_len + 10;
1805
1806	/* Now calculate the checksum and tack it on. */
1807	smi_msg->data[smi_msg->data_size]
1808		= ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7);
1809
1810	/*
1811	 * Add on the checksum size and the offset from the
1812	 * broadcast.
1813	 */
1814	smi_msg->data_size += 1;
1815
1816	smi_msg->msgid = msgid;
1817}
1818
1819static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf,
1820					     struct ipmi_smi_msg *smi_msg,
1821					     int priority)
1822{
1823	if (intf->curr_msg) {
1824		if (priority > 0)
1825			list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
1826		else
1827			list_add_tail(&smi_msg->link, &intf->xmit_msgs);
1828		smi_msg = NULL;
1829	} else {
1830		intf->curr_msg = smi_msg;
1831	}
1832
1833	return smi_msg;
1834}
1835
1836static void smi_send(struct ipmi_smi *intf,
1837		     const struct ipmi_smi_handlers *handlers,
1838		     struct ipmi_smi_msg *smi_msg, int priority)
1839{
1840	int run_to_completion = intf->run_to_completion;
1841	unsigned long flags = 0;
1842
1843	if (!run_to_completion)
1844		spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
1845	smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1846
1847	if (!run_to_completion)
1848		spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
1849
1850	if (smi_msg)
1851		handlers->sender(intf->send_info, smi_msg);
1852}
1853
1854static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg)
1855{
1856	return (((msg->netfn == IPMI_NETFN_APP_REQUEST)
1857		 && ((msg->cmd == IPMI_COLD_RESET_CMD)
1858		     || (msg->cmd == IPMI_WARM_RESET_CMD)))
1859		|| (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST));
1860}
1861
1862static int i_ipmi_req_sysintf(struct ipmi_smi        *intf,
1863			      struct ipmi_addr       *addr,
1864			      long                   msgid,
1865			      struct kernel_ipmi_msg *msg,
1866			      struct ipmi_smi_msg    *smi_msg,
1867			      struct ipmi_recv_msg   *recv_msg,
1868			      int                    retries,
1869			      unsigned int           retry_time_ms)
1870{
1871	struct ipmi_system_interface_addr *smi_addr;
1872
1873	if (msg->netfn & 1)
1874		/* Responses are not allowed to the SMI. */
1875		return -EINVAL;
1876
1877	smi_addr = (struct ipmi_system_interface_addr *) addr;
1878	if (smi_addr->lun > 3) {
1879		ipmi_inc_stat(intf, sent_invalid_commands);
1880		return -EINVAL;
1881	}
1882
1883	memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1884
1885	if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1886	    && ((msg->cmd == IPMI_SEND_MSG_CMD)
1887		|| (msg->cmd == IPMI_GET_MSG_CMD)
1888		|| (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
1889		/*
1890		 * We don't let the user do these, since we manage
1891		 * the sequence numbers.
1892		 */
1893		ipmi_inc_stat(intf, sent_invalid_commands);
1894		return -EINVAL;
1895	}
1896
1897	if (is_maintenance_mode_cmd(msg)) {
1898		unsigned long flags;
1899
1900		spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1901		intf->auto_maintenance_timeout
1902			= maintenance_mode_timeout_ms;
1903		if (!intf->maintenance_mode
1904		    && !intf->maintenance_mode_enable) {
1905			intf->maintenance_mode_enable = true;
1906			maintenance_mode_update(intf);
1907		}
1908		spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1909				       flags);
1910	}
1911
1912	if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) {
1913		ipmi_inc_stat(intf, sent_invalid_commands);
1914		return -EMSGSIZE;
1915	}
1916
1917	smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1918	smi_msg->data[1] = msg->cmd;
1919	smi_msg->msgid = msgid;
1920	smi_msg->user_data = recv_msg;
1921	if (msg->data_len > 0)
1922		memcpy(&smi_msg->data[2], msg->data, msg->data_len);
1923	smi_msg->data_size = msg->data_len + 2;
1924	ipmi_inc_stat(intf, sent_local_commands);
1925
1926	return 0;
1927}
1928
1929static int i_ipmi_req_ipmb(struct ipmi_smi        *intf,
1930			   struct ipmi_addr       *addr,
1931			   long                   msgid,
1932			   struct kernel_ipmi_msg *msg,
1933			   struct ipmi_smi_msg    *smi_msg,
1934			   struct ipmi_recv_msg   *recv_msg,
1935			   unsigned char          source_address,
1936			   unsigned char          source_lun,
1937			   int                    retries,
1938			   unsigned int           retry_time_ms)
1939{
1940	struct ipmi_ipmb_addr *ipmb_addr;
1941	unsigned char ipmb_seq;
1942	long seqid;
1943	int broadcast = 0;
1944	struct ipmi_channel *chans;
1945	int rv = 0;
1946
1947	if (addr->channel >= IPMI_MAX_CHANNELS) {
1948		ipmi_inc_stat(intf, sent_invalid_commands);
1949		return -EINVAL;
1950	}
1951
1952	chans = READ_ONCE(intf->channel_list)->c;
1953
1954	if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) {
1955		ipmi_inc_stat(intf, sent_invalid_commands);
1956		return -EINVAL;
1957	}
1958
1959	if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1960		/*
1961		 * Broadcasts add a zero at the beginning of the
1962		 * message, but otherwise is the same as an IPMB
1963		 * address.
1964		 */
1965		addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1966		broadcast = 1;
1967		retries = 0; /* Don't retry broadcasts. */
1968	}
1969
1970	/*
1971	 * 9 for the header and 1 for the checksum, plus
1972	 * possibly one for the broadcast.
1973	 */
1974	if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1975		ipmi_inc_stat(intf, sent_invalid_commands);
1976		return -EMSGSIZE;
1977	}
1978
1979	ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1980	if (ipmb_addr->lun > 3) {
1981		ipmi_inc_stat(intf, sent_invalid_commands);
1982		return -EINVAL;
1983	}
1984
1985	memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1986
1987	if (recv_msg->msg.netfn & 0x1) {
1988		/*
1989		 * It's a response, so use the user's sequence
1990		 * from msgid.
1991		 */
1992		ipmi_inc_stat(intf, sent_ipmb_responses);
1993		format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1994				msgid, broadcast,
1995				source_address, source_lun);
1996
1997		/*
1998		 * Save the receive message so we can use it
1999		 * to deliver the response.
2000		 */
2001		smi_msg->user_data = recv_msg;
2002	} else {
2003		/* It's a command, so get a sequence for it. */
2004		unsigned long flags;
2005
2006		spin_lock_irqsave(&intf->seq_lock, flags);
2007
2008		if (is_maintenance_mode_cmd(msg))
2009			intf->ipmb_maintenance_mode_timeout =
2010				maintenance_mode_timeout_ms;
2011
2012		if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0)
2013			/* Different default in maintenance mode */
2014			retry_time_ms = default_maintenance_retry_ms;
2015
2016		/*
2017		 * Create a sequence number with a 1 second
2018		 * timeout and 4 retries.
2019		 */
2020		rv = intf_next_seq(intf,
2021				   recv_msg,
2022				   retry_time_ms,
2023				   retries,
2024				   broadcast,
2025				   &ipmb_seq,
2026				   &seqid);
2027		if (rv)
2028			/*
2029			 * We have used up all the sequence numbers,
2030			 * probably, so abort.
2031			 */
2032			goto out_err;
2033
2034		ipmi_inc_stat(intf, sent_ipmb_commands);
2035
2036		/*
2037		 * Store the sequence number in the message,
2038		 * so that when the send message response
2039		 * comes back we can start the timer.
2040		 */
2041		format_ipmb_msg(smi_msg, msg, ipmb_addr,
2042				STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
2043				ipmb_seq, broadcast,
2044				source_address, source_lun);
2045
2046		/*
2047		 * Copy the message into the recv message data, so we
2048		 * can retransmit it later if necessary.
2049		 */
2050		memcpy(recv_msg->msg_data, smi_msg->data,
2051		       smi_msg->data_size);
2052		recv_msg->msg.data = recv_msg->msg_data;
2053		recv_msg->msg.data_len = smi_msg->data_size;
2054
2055		/*
2056		 * We don't unlock until here, because we need
2057		 * to copy the completed message into the
2058		 * recv_msg before we release the lock.
2059		 * Otherwise, race conditions may bite us.  I
2060		 * know that's pretty paranoid, but I prefer
2061		 * to be correct.
2062		 */
2063out_err:
2064		spin_unlock_irqrestore(&intf->seq_lock, flags);
2065	}
2066
2067	return rv;
2068}
2069
2070static int i_ipmi_req_lan(struct ipmi_smi        *intf,
2071			  struct ipmi_addr       *addr,
2072			  long                   msgid,
2073			  struct kernel_ipmi_msg *msg,
2074			  struct ipmi_smi_msg    *smi_msg,
2075			  struct ipmi_recv_msg   *recv_msg,
2076			  unsigned char          source_lun,
2077			  int                    retries,
2078			  unsigned int           retry_time_ms)
2079{
2080	struct ipmi_lan_addr  *lan_addr;
2081	unsigned char ipmb_seq;
2082	long seqid;
2083	struct ipmi_channel *chans;
2084	int rv = 0;
2085
2086	if (addr->channel >= IPMI_MAX_CHANNELS) {
2087		ipmi_inc_stat(intf, sent_invalid_commands);
2088		return -EINVAL;
2089	}
2090
2091	chans = READ_ONCE(intf->channel_list)->c;
2092
2093	if ((chans[addr->channel].medium
2094				!= IPMI_CHANNEL_MEDIUM_8023LAN)
2095			&& (chans[addr->channel].medium
2096			    != IPMI_CHANNEL_MEDIUM_ASYNC)) {
2097		ipmi_inc_stat(intf, sent_invalid_commands);
2098		return -EINVAL;
2099	}
2100
2101	/* 11 for the header and 1 for the checksum. */
2102	if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
2103		ipmi_inc_stat(intf, sent_invalid_commands);
2104		return -EMSGSIZE;
2105	}
2106
2107	lan_addr = (struct ipmi_lan_addr *) addr;
2108	if (lan_addr->lun > 3) {
2109		ipmi_inc_stat(intf, sent_invalid_commands);
2110		return -EINVAL;
2111	}
2112
2113	memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
2114
2115	if (recv_msg->msg.netfn & 0x1) {
2116		/*
2117		 * It's a response, so use the user's sequence
2118		 * from msgid.
2119		 */
2120		ipmi_inc_stat(intf, sent_lan_responses);
2121		format_lan_msg(smi_msg, msg, lan_addr, msgid,
2122			       msgid, source_lun);
2123
2124		/*
2125		 * Save the receive message so we can use it
2126		 * to deliver the response.
2127		 */
2128		smi_msg->user_data = recv_msg;
2129	} else {
2130		/* It's a command, so get a sequence for it. */
2131		unsigned long flags;
2132
2133		spin_lock_irqsave(&intf->seq_lock, flags);
2134
2135		/*
2136		 * Create a sequence number with a 1 second
2137		 * timeout and 4 retries.
2138		 */
2139		rv = intf_next_seq(intf,
2140				   recv_msg,
2141				   retry_time_ms,
2142				   retries,
2143				   0,
2144				   &ipmb_seq,
2145				   &seqid);
2146		if (rv)
2147			/*
2148			 * We have used up all the sequence numbers,
2149			 * probably, so abort.
2150			 */
2151			goto out_err;
2152
2153		ipmi_inc_stat(intf, sent_lan_commands);
2154
2155		/*
2156		 * Store the sequence number in the message,
2157		 * so that when the send message response
2158		 * comes back we can start the timer.
2159		 */
2160		format_lan_msg(smi_msg, msg, lan_addr,
2161			       STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
2162			       ipmb_seq, source_lun);
2163
2164		/*
2165		 * Copy the message into the recv message data, so we
2166		 * can retransmit it later if necessary.
2167		 */
2168		memcpy(recv_msg->msg_data, smi_msg->data,
2169		       smi_msg->data_size);
2170		recv_msg->msg.data = recv_msg->msg_data;
2171		recv_msg->msg.data_len = smi_msg->data_size;
2172
2173		/*
2174		 * We don't unlock until here, because we need
2175		 * to copy the completed message into the
2176		 * recv_msg before we release the lock.
2177		 * Otherwise, race conditions may bite us.  I
2178		 * know that's pretty paranoid, but I prefer
2179		 * to be correct.
2180		 */
2181out_err:
2182		spin_unlock_irqrestore(&intf->seq_lock, flags);
2183	}
2184
2185	return rv;
2186}
2187
2188/*
2189 * Separate from ipmi_request so that the user does not have to be
2190 * supplied in certain circumstances (mainly at panic time).  If
2191 * messages are supplied, they will be freed, even if an error
2192 * occurs.
2193 */
2194static int i_ipmi_request(struct ipmi_user     *user,
2195			  struct ipmi_smi      *intf,
2196			  struct ipmi_addr     *addr,
2197			  long                 msgid,
2198			  struct kernel_ipmi_msg *msg,
2199			  void                 *user_msg_data,
2200			  void                 *supplied_smi,
2201			  struct ipmi_recv_msg *supplied_recv,
2202			  int                  priority,
2203			  unsigned char        source_address,
2204			  unsigned char        source_lun,
2205			  int                  retries,
2206			  unsigned int         retry_time_ms)
2207{
2208	struct ipmi_smi_msg *smi_msg;
2209	struct ipmi_recv_msg *recv_msg;
2210	int rv = 0;
2211
2212	if (supplied_recv)
2213		recv_msg = supplied_recv;
2214	else {
2215		recv_msg = ipmi_alloc_recv_msg();
2216		if (recv_msg == NULL) {
2217			rv = -ENOMEM;
2218			goto out;
2219		}
2220	}
2221	recv_msg->user_msg_data = user_msg_data;
2222
2223	if (supplied_smi)
2224		smi_msg = (struct ipmi_smi_msg *) supplied_smi;
2225	else {
2226		smi_msg = ipmi_alloc_smi_msg();
2227		if (smi_msg == NULL) {
2228			if (!supplied_recv)
2229				ipmi_free_recv_msg(recv_msg);
2230			rv = -ENOMEM;
2231			goto out;
2232		}
2233	}
2234
2235	rcu_read_lock();
2236	if (intf->in_shutdown) {
2237		rv = -ENODEV;
2238		goto out_err;
2239	}
2240
2241	recv_msg->user = user;
2242	if (user)
2243		/* The put happens when the message is freed. */
2244		kref_get(&user->refcount);
2245	recv_msg->msgid = msgid;
2246	/*
2247	 * Store the message to send in the receive message so timeout
2248	 * responses can get the proper response data.
2249	 */
2250	recv_msg->msg = *msg;
2251
2252	if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
2253		rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg,
2254					recv_msg, retries, retry_time_ms);
2255	} else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
2256		rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg,
2257				     source_address, source_lun,
2258				     retries, retry_time_ms);
2259	} else if (is_lan_addr(addr)) {
2260		rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg,
2261				    source_lun, retries, retry_time_ms);
2262	} else {
2263	    /* Unknown address type. */
2264		ipmi_inc_stat(intf, sent_invalid_commands);
2265		rv = -EINVAL;
2266	}
2267
2268	if (rv) {
2269out_err:
2270		ipmi_free_smi_msg(smi_msg);
2271		ipmi_free_recv_msg(recv_msg);
2272	} else {
2273		pr_debug("Send: %*ph\n", smi_msg->data_size, smi_msg->data);
2274
2275		smi_send(intf, intf->handlers, smi_msg, priority);
2276	}
2277	rcu_read_unlock();
2278
2279out:
2280	return rv;
2281}
2282
2283static int check_addr(struct ipmi_smi  *intf,
2284		      struct ipmi_addr *addr,
2285		      unsigned char    *saddr,
2286		      unsigned char    *lun)
2287{
2288	if (addr->channel >= IPMI_MAX_CHANNELS)
2289		return -EINVAL;
2290	addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
2291	*lun = intf->addrinfo[addr->channel].lun;
2292	*saddr = intf->addrinfo[addr->channel].address;
2293	return 0;
2294}
2295
2296int ipmi_request_settime(struct ipmi_user *user,
2297			 struct ipmi_addr *addr,
2298			 long             msgid,
2299			 struct kernel_ipmi_msg  *msg,
2300			 void             *user_msg_data,
2301			 int              priority,
2302			 int              retries,
2303			 unsigned int     retry_time_ms)
2304{
2305	unsigned char saddr = 0, lun = 0;
2306	int rv, index;
2307
2308	if (!user)
2309		return -EINVAL;
2310
2311	user = acquire_ipmi_user(user, &index);
2312	if (!user)
2313		return -ENODEV;
2314
2315	rv = check_addr(user->intf, addr, &saddr, &lun);
2316	if (!rv)
2317		rv = i_ipmi_request(user,
2318				    user->intf,
2319				    addr,
2320				    msgid,
2321				    msg,
2322				    user_msg_data,
2323				    NULL, NULL,
2324				    priority,
2325				    saddr,
2326				    lun,
2327				    retries,
2328				    retry_time_ms);
2329
2330	release_ipmi_user(user, index);
2331	return rv;
2332}
2333EXPORT_SYMBOL(ipmi_request_settime);
2334
2335int ipmi_request_supply_msgs(struct ipmi_user     *user,
2336			     struct ipmi_addr     *addr,
2337			     long                 msgid,
2338			     struct kernel_ipmi_msg *msg,
2339			     void                 *user_msg_data,
2340			     void                 *supplied_smi,
2341			     struct ipmi_recv_msg *supplied_recv,
2342			     int                  priority)
2343{
2344	unsigned char saddr = 0, lun = 0;
2345	int rv, index;
2346
2347	if (!user)
2348		return -EINVAL;
2349
2350	user = acquire_ipmi_user(user, &index);
2351	if (!user)
2352		return -ENODEV;
2353
2354	rv = check_addr(user->intf, addr, &saddr, &lun);
2355	if (!rv)
2356		rv = i_ipmi_request(user,
2357				    user->intf,
2358				    addr,
2359				    msgid,
2360				    msg,
2361				    user_msg_data,
2362				    supplied_smi,
2363				    supplied_recv,
2364				    priority,
2365				    saddr,
2366				    lun,
2367				    -1, 0);
2368
2369	release_ipmi_user(user, index);
2370	return rv;
2371}
2372EXPORT_SYMBOL(ipmi_request_supply_msgs);
2373
2374static void bmc_device_id_handler(struct ipmi_smi *intf,
2375				  struct ipmi_recv_msg *msg)
2376{
2377	int rv;
2378
2379	if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2380			|| (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2381			|| (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) {
2382		dev_warn(intf->si_dev,
2383			 "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
2384			 msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd);
2385		return;
2386	}
2387
2388	rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
2389			msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
2390	if (rv) {
2391		dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv);
2392		/* record completion code when error */
2393		intf->bmc->cc = msg->msg.data[0];
2394		intf->bmc->dyn_id_set = 0;
2395	} else {
2396		/*
2397		 * Make sure the id data is available before setting
2398		 * dyn_id_set.
2399		 */
2400		smp_wmb();
2401		intf->bmc->dyn_id_set = 1;
2402	}
2403
2404	wake_up(&intf->waitq);
2405}
2406
2407static int
2408send_get_device_id_cmd(struct ipmi_smi *intf)
2409{
2410	struct ipmi_system_interface_addr si;
2411	struct kernel_ipmi_msg msg;
2412
2413	si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2414	si.channel = IPMI_BMC_CHANNEL;
2415	si.lun = 0;
2416
2417	msg.netfn = IPMI_NETFN_APP_REQUEST;
2418	msg.cmd = IPMI_GET_DEVICE_ID_CMD;
2419	msg.data = NULL;
2420	msg.data_len = 0;
2421
2422	return i_ipmi_request(NULL,
2423			      intf,
2424			      (struct ipmi_addr *) &si,
2425			      0,
2426			      &msg,
2427			      intf,
2428			      NULL,
2429			      NULL,
2430			      0,
2431			      intf->addrinfo[0].address,
2432			      intf->addrinfo[0].lun,
2433			      -1, 0);
2434}
2435
2436static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
2437{
2438	int rv;
2439	unsigned int retry_count = 0;
2440
2441	intf->null_user_handler = bmc_device_id_handler;
2442
2443retry:
2444	bmc->cc = 0;
2445	bmc->dyn_id_set = 2;
2446
2447	rv = send_get_device_id_cmd(intf);
2448	if (rv)
2449		goto out_reset_handler;
2450
2451	wait_event(intf->waitq, bmc->dyn_id_set != 2);
2452
2453	if (!bmc->dyn_id_set) {
2454		if ((bmc->cc == IPMI_DEVICE_IN_FW_UPDATE_ERR
2455		     || bmc->cc ==  IPMI_DEVICE_IN_INIT_ERR
2456		     || bmc->cc ==  IPMI_NOT_IN_MY_STATE_ERR)
2457		     && ++retry_count <= GET_DEVICE_ID_MAX_RETRY) {
2458			msleep(500);
2459			dev_warn(intf->si_dev,
2460			    "BMC returned 0x%2.2x, retry get bmc device id\n",
2461			    bmc->cc);
2462			goto retry;
2463		}
2464
2465		rv = -EIO; /* Something went wrong in the fetch. */
2466	}
2467
2468	/* dyn_id_set makes the id data available. */
2469	smp_rmb();
2470
2471out_reset_handler:
2472	intf->null_user_handler = NULL;
2473
2474	return rv;
2475}
2476
2477/*
2478 * Fetch the device id for the bmc/interface.  You must pass in either
2479 * bmc or intf, this code will get the other one.  If the data has
2480 * been recently fetched, this will just use the cached data.  Otherwise
2481 * it will run a new fetch.
2482 *
2483 * Except for the first time this is called (in ipmi_add_smi()),
2484 * this will always return good data;
2485 */
2486static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2487			       struct ipmi_device_id *id,
2488			       bool *guid_set, guid_t *guid, int intf_num)
2489{
2490	int rv = 0;
2491	int prev_dyn_id_set, prev_guid_set;
2492	bool intf_set = intf != NULL;
2493
2494	if (!intf) {
2495		mutex_lock(&bmc->dyn_mutex);
2496retry_bmc_lock:
2497		if (list_empty(&bmc->intfs)) {
2498			mutex_unlock(&bmc->dyn_mutex);
2499			return -ENOENT;
2500		}
2501		intf = list_first_entry(&bmc->intfs, struct ipmi_smi,
2502					bmc_link);
2503		kref_get(&intf->refcount);
2504		mutex_unlock(&bmc->dyn_mutex);
2505		mutex_lock(&intf->bmc_reg_mutex);
2506		mutex_lock(&bmc->dyn_mutex);
2507		if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi,
2508					     bmc_link)) {
2509			mutex_unlock(&intf->bmc_reg_mutex);
2510			kref_put(&intf->refcount, intf_free);
2511			goto retry_bmc_lock;
2512		}
2513	} else {
2514		mutex_lock(&intf->bmc_reg_mutex);
2515		bmc = intf->bmc;
2516		mutex_lock(&bmc->dyn_mutex);
2517		kref_get(&intf->refcount);
2518	}
2519
2520	/* If we have a valid and current ID, just return that. */
2521	if (intf->in_bmc_register ||
2522	    (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
2523		goto out_noprocessing;
2524
2525	prev_guid_set = bmc->dyn_guid_set;
2526	__get_guid(intf);
2527
2528	prev_dyn_id_set = bmc->dyn_id_set;
2529	rv = __get_device_id(intf, bmc);
2530	if (rv)
2531		goto out;
2532
2533	/*
2534	 * The guid, device id, manufacturer id, and product id should
2535	 * not change on a BMC.  If it does we have to do some dancing.
2536	 */
2537	if (!intf->bmc_registered
2538	    || (!prev_guid_set && bmc->dyn_guid_set)
2539	    || (!prev_dyn_id_set && bmc->dyn_id_set)
2540	    || (prev_guid_set && bmc->dyn_guid_set
2541		&& !guid_equal(&bmc->guid, &bmc->fetch_guid))
2542	    || bmc->id.device_id != bmc->fetch_id.device_id
2543	    || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id
2544	    || bmc->id.product_id != bmc->fetch_id.product_id) {
2545		struct ipmi_device_id id = bmc->fetch_id;
2546		int guid_set = bmc->dyn_guid_set;
2547		guid_t guid;
2548
2549		guid = bmc->fetch_guid;
2550		mutex_unlock(&bmc->dyn_mutex);
2551
2552		__ipmi_bmc_unregister(intf);
2553		/* Fill in the temporary BMC for good measure. */
2554		intf->bmc->id = id;
2555		intf->bmc->dyn_guid_set = guid_set;
2556		intf->bmc->guid = guid;
2557		if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
2558			need_waiter(intf); /* Retry later on an error. */
2559		else
2560			__scan_channels(intf, &id);
2561
2562
2563		if (!intf_set) {
2564			/*
2565			 * We weren't given the interface on the
2566			 * command line, so restart the operation on
2567			 * the next interface for the BMC.
2568			 */
2569			mutex_unlock(&intf->bmc_reg_mutex);
2570			mutex_lock(&bmc->dyn_mutex);
2571			goto retry_bmc_lock;
2572		}
2573
2574		/* We have a new BMC, set it up. */
2575		bmc = intf->bmc;
2576		mutex_lock(&bmc->dyn_mutex);
2577		goto out_noprocessing;
2578	} else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id)))
2579		/* Version info changes, scan the channels again. */
2580		__scan_channels(intf, &bmc->fetch_id);
2581
2582	bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
2583
2584out:
2585	if (rv && prev_dyn_id_set) {
2586		rv = 0; /* Ignore failures if we have previous data. */
2587		bmc->dyn_id_set = prev_dyn_id_set;
2588	}
2589	if (!rv) {
2590		bmc->id = bmc->fetch_id;
2591		if (bmc->dyn_guid_set)
2592			bmc->guid = bmc->fetch_guid;
2593		else if (prev_guid_set)
2594			/*
2595			 * The guid used to be valid and it failed to fetch,
2596			 * just use the cached value.
2597			 */
2598			bmc->dyn_guid_set = prev_guid_set;
2599	}
2600out_noprocessing:
2601	if (!rv) {
2602		if (id)
2603			*id = bmc->id;
2604
2605		if (guid_set)
2606			*guid_set = bmc->dyn_guid_set;
2607
2608		if (guid && bmc->dyn_guid_set)
2609			*guid =  bmc->guid;
2610	}
2611
2612	mutex_unlock(&bmc->dyn_mutex);
2613	mutex_unlock(&intf->bmc_reg_mutex);
2614
2615	kref_put(&intf->refcount, intf_free);
2616	return rv;
2617}
2618
2619static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2620			     struct ipmi_device_id *id,
2621			     bool *guid_set, guid_t *guid)
2622{
2623	return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1);
2624}
2625
2626static ssize_t device_id_show(struct device *dev,
2627			      struct device_attribute *attr,
2628			      char *buf)
2629{
2630	struct bmc_device *bmc = to_bmc_device(dev);
2631	struct ipmi_device_id id;
2632	int rv;
2633
2634	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2635	if (rv)
2636		return rv;
2637
2638	return snprintf(buf, 10, "%u\n", id.device_id);
2639}
2640static DEVICE_ATTR_RO(device_id);
2641
2642static ssize_t provides_device_sdrs_show(struct device *dev,
2643					 struct device_attribute *attr,
2644					 char *buf)
2645{
2646	struct bmc_device *bmc = to_bmc_device(dev);
2647	struct ipmi_device_id id;
2648	int rv;
2649
2650	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2651	if (rv)
2652		return rv;
2653
2654	return snprintf(buf, 10, "%u\n", (id.device_revision & 0x80) >> 7);
2655}
2656static DEVICE_ATTR_RO(provides_device_sdrs);
2657
2658static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2659			     char *buf)
2660{
2661	struct bmc_device *bmc = to_bmc_device(dev);
2662	struct ipmi_device_id id;
2663	int rv;
2664
2665	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2666	if (rv)
2667		return rv;
2668
2669	return snprintf(buf, 20, "%u\n", id.device_revision & 0x0F);
2670}
2671static DEVICE_ATTR_RO(revision);
2672
2673static ssize_t firmware_revision_show(struct device *dev,
2674				      struct device_attribute *attr,
2675				      char *buf)
2676{
2677	struct bmc_device *bmc = to_bmc_device(dev);
2678	struct ipmi_device_id id;
2679	int rv;
2680
2681	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2682	if (rv)
2683		return rv;
2684
2685	return snprintf(buf, 20, "%u.%x\n", id.firmware_revision_1,
2686			id.firmware_revision_2);
2687}
2688static DEVICE_ATTR_RO(firmware_revision);
2689
2690static ssize_t ipmi_version_show(struct device *dev,
2691				 struct device_attribute *attr,
2692				 char *buf)
2693{
2694	struct bmc_device *bmc = to_bmc_device(dev);
2695	struct ipmi_device_id id;
2696	int rv;
2697
2698	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2699	if (rv)
2700		return rv;
2701
2702	return snprintf(buf, 20, "%u.%u\n",
2703			ipmi_version_major(&id),
2704			ipmi_version_minor(&id));
2705}
2706static DEVICE_ATTR_RO(ipmi_version);
2707
2708static ssize_t add_dev_support_show(struct device *dev,
2709				    struct device_attribute *attr,
2710				    char *buf)
2711{
2712	struct bmc_device *bmc = to_bmc_device(dev);
2713	struct ipmi_device_id id;
2714	int rv;
2715
2716	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2717	if (rv)
2718		return rv;
2719
2720	return snprintf(buf, 10, "0x%02x\n", id.additional_device_support);
2721}
2722static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
2723		   NULL);
2724
2725static ssize_t manufacturer_id_show(struct device *dev,
2726				    struct device_attribute *attr,
2727				    char *buf)
2728{
2729	struct bmc_device *bmc = to_bmc_device(dev);
2730	struct ipmi_device_id id;
2731	int rv;
2732
2733	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2734	if (rv)
2735		return rv;
2736
2737	return snprintf(buf, 20, "0x%6.6x\n", id.manufacturer_id);
2738}
2739static DEVICE_ATTR_RO(manufacturer_id);
2740
2741static ssize_t product_id_show(struct device *dev,
2742			       struct device_attribute *attr,
2743			       char *buf)
2744{
2745	struct bmc_device *bmc = to_bmc_device(dev);
2746	struct ipmi_device_id id;
2747	int rv;
2748
2749	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2750	if (rv)
2751		return rv;
2752
2753	return snprintf(buf, 10, "0x%4.4x\n", id.product_id);
2754}
2755static DEVICE_ATTR_RO(product_id);
2756
2757static ssize_t aux_firmware_rev_show(struct device *dev,
2758				     struct device_attribute *attr,
2759				     char *buf)
2760{
2761	struct bmc_device *bmc = to_bmc_device(dev);
2762	struct ipmi_device_id id;
2763	int rv;
2764
2765	rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2766	if (rv)
2767		return rv;
2768
2769	return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2770			id.aux_firmware_revision[3],
2771			id.aux_firmware_revision[2],
2772			id.aux_firmware_revision[1],
2773			id.aux_firmware_revision[0]);
2774}
2775static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
2776
2777static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2778			 char *buf)
2779{
2780	struct bmc_device *bmc = to_bmc_device(dev);
2781	bool guid_set;
2782	guid_t guid;
2783	int rv;
2784
2785	rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid);
2786	if (rv)
2787		return rv;
2788	if (!guid_set)
2789		return -ENOENT;
2790
2791	return snprintf(buf, UUID_STRING_LEN + 1 + 1, "%pUl\n", &guid);
2792}
2793static DEVICE_ATTR_RO(guid);
2794
2795static struct attribute *bmc_dev_attrs[] = {
2796	&dev_attr_device_id.attr,
2797	&dev_attr_provides_device_sdrs.attr,
2798	&dev_attr_revision.attr,
2799	&dev_attr_firmware_revision.attr,
2800	&dev_attr_ipmi_version.attr,
2801	&dev_attr_additional_device_support.attr,
2802	&dev_attr_manufacturer_id.attr,
2803	&dev_attr_product_id.attr,
2804	&dev_attr_aux_firmware_revision.attr,
2805	&dev_attr_guid.attr,
2806	NULL
2807};
2808
2809static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
2810				       struct attribute *attr, int idx)
2811{
2812	struct device *dev = kobj_to_dev(kobj);
2813	struct bmc_device *bmc = to_bmc_device(dev);
2814	umode_t mode = attr->mode;
2815	int rv;
2816
2817	if (attr == &dev_attr_aux_firmware_revision.attr) {
2818		struct ipmi_device_id id;
2819
2820		rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2821		return (!rv && id.aux_firmware_revision_set) ? mode : 0;
2822	}
2823	if (attr == &dev_attr_guid.attr) {
2824		bool guid_set;
2825
2826		rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL);
2827		return (!rv && guid_set) ? mode : 0;
2828	}
2829	return mode;
2830}
2831
2832static const struct attribute_group bmc_dev_attr_group = {
2833	.attrs		= bmc_dev_attrs,
2834	.is_visible	= bmc_dev_attr_is_visible,
2835};
2836
2837static const struct attribute_group *bmc_dev_attr_groups[] = {
2838	&bmc_dev_attr_group,
2839	NULL
2840};
2841
2842static const struct device_type bmc_device_type = {
2843	.groups		= bmc_dev_attr_groups,
2844};
2845
2846static int __find_bmc_guid(struct device *dev, const void *data)
2847{
2848	const guid_t *guid = data;
2849	struct bmc_device *bmc;
2850	int rv;
2851
2852	if (dev->type != &bmc_device_type)
2853		return 0;
2854
2855	bmc = to_bmc_device(dev);
2856	rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid);
2857	if (rv)
2858		rv = kref_get_unless_zero(&bmc->usecount);
2859	return rv;
2860}
2861
2862/*
2863 * Returns with the bmc's usecount incremented, if it is non-NULL.
2864 */
2865static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
2866					     guid_t *guid)
2867{
2868	struct device *dev;
2869	struct bmc_device *bmc = NULL;
2870
2871	dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
2872	if (dev) {
2873		bmc = to_bmc_device(dev);
2874		put_device(dev);
2875	}
2876	return bmc;
2877}
2878
2879struct prod_dev_id {
2880	unsigned int  product_id;
2881	unsigned char device_id;
2882};
2883
2884static int __find_bmc_prod_dev_id(struct device *dev, const void *data)
2885{
2886	const struct prod_dev_id *cid = data;
2887	struct bmc_device *bmc;
2888	int rv;
2889
2890	if (dev->type != &bmc_device_type)
2891		return 0;
2892
2893	bmc = to_bmc_device(dev);
2894	rv = (bmc->id.product_id == cid->product_id
2895	      && bmc->id.device_id == cid->device_id);
2896	if (rv)
2897		rv = kref_get_unless_zero(&bmc->usecount);
2898	return rv;
2899}
2900
2901/*
2902 * Returns with the bmc's usecount incremented, if it is non-NULL.
2903 */
2904static struct bmc_device *ipmi_find_bmc_prod_dev_id(
2905	struct device_driver *drv,
2906	unsigned int product_id, unsigned char device_id)
2907{
2908	struct prod_dev_id id = {
2909		.product_id = product_id,
2910		.device_id = device_id,
2911	};
2912	struct device *dev;
2913	struct bmc_device *bmc = NULL;
2914
2915	dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
2916	if (dev) {
2917		bmc = to_bmc_device(dev);
2918		put_device(dev);
2919	}
2920	return bmc;
2921}
2922
2923static DEFINE_IDA(ipmi_bmc_ida);
2924
2925static void
2926release_bmc_device(struct device *dev)
2927{
2928	kfree(to_bmc_device(dev));
2929}
2930
2931static void cleanup_bmc_work(struct work_struct *work)
2932{
2933	struct bmc_device *bmc = container_of(work, struct bmc_device,
2934					      remove_work);
2935	int id = bmc->pdev.id; /* Unregister overwrites id */
2936
2937	platform_device_unregister(&bmc->pdev);
2938	ida_simple_remove(&ipmi_bmc_ida, id);
2939}
2940
2941static void
2942cleanup_bmc_device(struct kref *ref)
2943{
2944	struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
2945
2946	/*
2947	 * Remove the platform device in a work queue to avoid issues
2948	 * with removing the device attributes while reading a device
2949	 * attribute.
2950	 */
2951	queue_work(remove_work_wq, &bmc->remove_work);
2952}
2953
2954/*
2955 * Must be called with intf->bmc_reg_mutex held.
2956 */
2957static void __ipmi_bmc_unregister(struct ipmi_smi *intf)
2958{
2959	struct bmc_device *bmc = intf->bmc;
2960
2961	if (!intf->bmc_registered)
2962		return;
2963
2964	sysfs_remove_link(&intf->si_dev->kobj, "bmc");
2965	sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
2966	kfree(intf->my_dev_name);
2967	intf->my_dev_name = NULL;
2968
2969	mutex_lock(&bmc->dyn_mutex);
2970	list_del(&intf->bmc_link);
2971	mutex_unlock(&bmc->dyn_mutex);
2972	intf->bmc = &intf->tmp_bmc;
2973	kref_put(&bmc->usecount, cleanup_bmc_device);
2974	intf->bmc_registered = false;
2975}
2976
2977static void ipmi_bmc_unregister(struct ipmi_smi *intf)
2978{
2979	mutex_lock(&intf->bmc_reg_mutex);
2980	__ipmi_bmc_unregister(intf);
2981	mutex_unlock(&intf->bmc_reg_mutex);
2982}
2983
2984/*
2985 * Must be called with intf->bmc_reg_mutex held.
2986 */
2987static int __ipmi_bmc_register(struct ipmi_smi *intf,
2988			       struct ipmi_device_id *id,
2989			       bool guid_set, guid_t *guid, int intf_num)
2990{
2991	int               rv;
2992	struct bmc_device *bmc;
2993	struct bmc_device *old_bmc;
2994
2995	/*
2996	 * platform_device_register() can cause bmc_reg_mutex to
2997	 * be claimed because of the is_visible functions of
2998	 * the attributes.  Eliminate possible recursion and
2999	 * release the lock.
3000	 */
3001	intf->in_bmc_register = true;
3002	mutex_unlock(&intf->bmc_reg_mutex);
3003
3004	/*
3005	 * Try to find if there is an bmc_device struct
3006	 * representing the interfaced BMC already
3007	 */
3008	mutex_lock(&ipmidriver_mutex);
3009	if (guid_set)
3010		old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid);
3011	else
3012		old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
3013						    id->product_id,
3014						    id->device_id);
3015
3016	/*
3017	 * If there is already an bmc_device, free the new one,
3018	 * otherwise register the new BMC device
3019	 */
3020	if (old_bmc) {
3021		bmc = old_bmc;
3022		/*
3023		 * Note: old_bmc already has usecount incremented by
3024		 * the BMC find functions.
3025		 */
3026		intf->bmc = old_bmc;
3027		mutex_lock(&bmc->dyn_mutex);
3028		list_add_tail(&intf->bmc_link, &bmc->intfs);
3029		mutex_unlock(&bmc->dyn_mutex);
3030
3031		dev_info(intf->si_dev,
3032			 "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3033			 bmc->id.manufacturer_id,
3034			 bmc->id.product_id,
3035			 bmc->id.device_id);
3036	} else {
3037		bmc = kzalloc(sizeof(*bmc), GFP_KERNEL);
3038		if (!bmc) {
3039			rv = -ENOMEM;
3040			goto out;
3041		}
3042		INIT_LIST_HEAD(&bmc->intfs);
3043		mutex_init(&bmc->dyn_mutex);
3044		INIT_WORK(&bmc->remove_work, cleanup_bmc_work);
3045
3046		bmc->id = *id;
3047		bmc->dyn_id_set = 1;
3048		bmc->dyn_guid_set = guid_set;
3049		bmc->guid = *guid;
3050		bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
3051
3052		bmc->pdev.name = "ipmi_bmc";
3053
3054		rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL);
3055		if (rv < 0) {
3056			kfree(bmc);
3057			goto out;
3058		}
3059
3060		bmc->pdev.dev.driver = &ipmidriver.driver;
3061		bmc->pdev.id = rv;
3062		bmc->pdev.dev.release = release_bmc_device;
3063		bmc->pdev.dev.type = &bmc_device_type;
3064		kref_init(&bmc->usecount);
3065
3066		intf->bmc = bmc;
3067		mutex_lock(&bmc->dyn_mutex);
3068		list_add_tail(&intf->bmc_link, &bmc->intfs);
3069		mutex_unlock(&bmc->dyn_mutex);
3070
3071		rv = platform_device_register(&bmc->pdev);
3072		if (rv) {
3073			dev_err(intf->si_dev,
3074				"Unable to register bmc device: %d\n",
3075				rv);
3076			goto out_list_del;
3077		}
3078
3079		dev_info(intf->si_dev,
3080			 "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3081			 bmc->id.manufacturer_id,
3082			 bmc->id.product_id,
3083			 bmc->id.device_id);
3084	}
3085
3086	/*
3087	 * create symlink from system interface device to bmc device
3088	 * and back.
3089	 */
3090	rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
3091	if (rv) {
3092		dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv);
3093		goto out_put_bmc;
3094	}
3095
3096	if (intf_num == -1)
3097		intf_num = intf->intf_num;
3098	intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num);
3099	if (!intf->my_dev_name) {
3100		rv = -ENOMEM;
3101		dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n",
3102			rv);
3103		goto out_unlink1;
3104	}
3105
3106	rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
3107			       intf->my_dev_name);
3108	if (rv) {
3109		dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n",
3110			rv);
3111		goto out_free_my_dev_name;
3112	}
3113
3114	intf->bmc_registered = true;
3115
3116out:
3117	mutex_unlock(&ipmidriver_mutex);
3118	mutex_lock(&intf->bmc_reg_mutex);
3119	intf->in_bmc_register = false;
3120	return rv;
3121
3122
3123out_free_my_dev_name:
3124	kfree(intf->my_dev_name);
3125	intf->my_dev_name = NULL;
3126
3127out_unlink1:
3128	sysfs_remove_link(&intf->si_dev->kobj, "bmc");
3129
3130out_put_bmc:
3131	mutex_lock(&bmc->dyn_mutex);
3132	list_del(&intf->bmc_link);
3133	mutex_unlock(&bmc->dyn_mutex);
3134	intf->bmc = &intf->tmp_bmc;
3135	kref_put(&bmc->usecount, cleanup_bmc_device);
3136	goto out;
3137
3138out_list_del:
3139	mutex_lock(&bmc->dyn_mutex);
3140	list_del(&intf->bmc_link);
3141	mutex_unlock(&bmc->dyn_mutex);
3142	intf->bmc = &intf->tmp_bmc;
3143	put_device(&bmc->pdev.dev);
3144	goto out;
3145}
3146
3147static int
3148send_guid_cmd(struct ipmi_smi *intf, int chan)
3149{
3150	struct kernel_ipmi_msg            msg;
3151	struct ipmi_system_interface_addr si;
3152
3153	si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3154	si.channel = IPMI_BMC_CHANNEL;
3155	si.lun = 0;
3156
3157	msg.netfn = IPMI_NETFN_APP_REQUEST;
3158	msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
3159	msg.data = NULL;
3160	msg.data_len = 0;
3161	return i_ipmi_request(NULL,
3162			      intf,
3163			      (struct ipmi_addr *) &si,
3164			      0,
3165			      &msg,
3166			      intf,
3167			      NULL,
3168			      NULL,
3169			      0,
3170			      intf->addrinfo[0].address,
3171			      intf->addrinfo[0].lun,
3172			      -1, 0);
3173}
3174
3175static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3176{
3177	struct bmc_device *bmc = intf->bmc;
3178
3179	if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3180	    || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
3181	    || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
3182		/* Not for me */
3183		return;
3184
3185	if (msg->msg.data[0] != 0) {
3186		/* Error from getting the GUID, the BMC doesn't have one. */
3187		bmc->dyn_guid_set = 0;
3188		goto out;
3189	}
3190
3191	if (msg->msg.data_len < UUID_SIZE + 1) {
3192		bmc->dyn_guid_set = 0;
3193		dev_warn(intf->si_dev,
3194			 "The GUID response from the BMC was too short, it was %d but should have been %d.  Assuming GUID is not available.\n",
3195			 msg->msg.data_len, UUID_SIZE + 1);
3196		goto out;
3197	}
3198
3199	import_guid(&bmc->fetch_guid, msg->msg.data + 1);
3200	/*
3201	 * Make sure the guid data is available before setting
3202	 * dyn_guid_set.
3203	 */
3204	smp_wmb();
3205	bmc->dyn_guid_set = 1;
3206 out:
3207	wake_up(&intf->waitq);
3208}
3209
3210static void __get_guid(struct ipmi_smi *intf)
3211{
3212	int rv;
3213	struct bmc_device *bmc = intf->bmc;
3214
3215	bmc->dyn_guid_set = 2;
3216	intf->null_user_handler = guid_handler;
3217	rv = send_guid_cmd(intf, 0);
3218	if (rv)
3219		/* Send failed, no GUID available. */
3220		bmc->dyn_guid_set = 0;
3221	else
3222		wait_event(intf->waitq, bmc->dyn_guid_set != 2);
3223
3224	/* dyn_guid_set makes the guid data available. */
3225	smp_rmb();
3226
3227	intf->null_user_handler = NULL;
3228}
3229
3230static int
3231send_channel_info_cmd(struct ipmi_smi *intf, int chan)
3232{
3233	struct kernel_ipmi_msg            msg;
3234	unsigned char                     data[1];
3235	struct ipmi_system_interface_addr si;
3236
3237	si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3238	si.channel = IPMI_BMC_CHANNEL;
3239	si.lun = 0;
3240
3241	msg.netfn = IPMI_NETFN_APP_REQUEST;
3242	msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
3243	msg.data = data;
3244	msg.data_len = 1;
3245	data[0] = chan;
3246	return i_ipmi_request(NULL,
3247			      intf,
3248			      (struct ipmi_addr *) &si,
3249			      0,
3250			      &msg,
3251			      intf,
3252			      NULL,
3253			      NULL,
3254			      0,
3255			      intf->addrinfo[0].address,
3256			      intf->addrinfo[0].lun,
3257			      -1, 0);
3258}
3259
3260static void
3261channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3262{
3263	int rv = 0;
3264	int ch;
3265	unsigned int set = intf->curr_working_cset;
3266	struct ipmi_channel *chans;
3267
3268	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3269	    && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3270	    && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
3271		/* It's the one we want */
3272		if (msg->msg.data[0] != 0) {
3273			/* Got an error from the channel, just go on. */
3274			if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
3275				/*
3276				 * If the MC does not support this
3277				 * command, that is legal.  We just
3278				 * assume it has one IPMB at channel
3279				 * zero.
3280				 */
3281				intf->wchannels[set].c[0].medium
3282					= IPMI_CHANNEL_MEDIUM_IPMB;
3283				intf->wchannels[set].c[0].protocol
3284					= IPMI_CHANNEL_PROTOCOL_IPMB;
3285
3286				intf->channel_list = intf->wchannels + set;
3287				intf->channels_ready = true;
3288				wake_up(&intf->waitq);
3289				goto out;
3290			}
3291			goto next_channel;
3292		}
3293		if (msg->msg.data_len < 4) {
3294			/* Message not big enough, just go on. */
3295			goto next_channel;
3296		}
3297		ch = intf->curr_channel;
3298		chans = intf->wchannels[set].c;
3299		chans[ch].medium = msg->msg.data[2] & 0x7f;
3300		chans[ch].protocol = msg->msg.data[3] & 0x1f;
3301
3302 next_channel:
3303		intf->curr_channel++;
3304		if (intf->curr_channel >= IPMI_MAX_CHANNELS) {
3305			intf->channel_list = intf->wchannels + set;
3306			intf->channels_ready = true;
3307			wake_up(&intf->waitq);
3308		} else {
3309			intf->channel_list = intf->wchannels + set;
3310			intf->channels_ready = true;
3311			rv = send_channel_info_cmd(intf, intf->curr_channel);
3312		}
3313
3314		if (rv) {
3315			/* Got an error somehow, just give up. */
3316			dev_warn(intf->si_dev,
3317				 "Error sending channel information for channel %d: %d\n",
3318				 intf->curr_channel, rv);
3319
3320			intf->channel_list = intf->wchannels + set;
3321			intf->channels_ready = true;
3322			wake_up(&intf->waitq);
3323		}
3324	}
3325 out:
3326	return;
3327}
3328
3329/*
3330 * Must be holding intf->bmc_reg_mutex to call this.
3331 */
3332static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id)
3333{
3334	int rv;
3335
3336	if (ipmi_version_major(id) > 1
3337			|| (ipmi_version_major(id) == 1
3338			    && ipmi_version_minor(id) >= 5)) {
3339		unsigned int set;
3340
3341		/*
3342		 * Start scanning the channels to see what is
3343		 * available.
3344		 */
3345		set = !intf->curr_working_cset;
3346		intf->curr_working_cset = set;
3347		memset(&intf->wchannels[set], 0,
3348		       sizeof(struct ipmi_channel_set));
3349
3350		intf->null_user_handler = channel_handler;
3351		intf->curr_channel = 0;
3352		rv = send_channel_info_cmd(intf, 0);
3353		if (rv) {
3354			dev_warn(intf->si_dev,
3355				 "Error sending channel information for channel 0, %d\n",
3356				 rv);
3357			intf->null_user_handler = NULL;
3358			return -EIO;
3359		}
3360
3361		/* Wait for the channel info to be read. */
3362		wait_event(intf->waitq, intf->channels_ready);
3363		intf->null_user_handler = NULL;
3364	} else {
3365		unsigned int set = intf->curr_working_cset;
3366
3367		/* Assume a single IPMB channel at zero. */
3368		intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
3369		intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
3370		intf->channel_list = intf->wchannels + set;
3371		intf->channels_ready = true;
3372	}
3373
3374	return 0;
3375}
3376
3377static void ipmi_poll(struct ipmi_smi *intf)
3378{
3379	if (intf->handlers->poll)
3380		intf->handlers->poll(intf->send_info);
3381	/* In case something came in */
3382	handle_new_recv_msgs(intf);
3383}
3384
3385void ipmi_poll_interface(struct ipmi_user *user)
3386{
3387	ipmi_poll(user->intf);
3388}
3389EXPORT_SYMBOL(ipmi_poll_interface);
3390
3391static void redo_bmc_reg(struct work_struct *work)
3392{
3393	struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
3394					     bmc_reg_work);
3395
3396	if (!intf->in_shutdown)
3397		bmc_get_device_id(intf, NULL, NULL, NULL, NULL);
3398
3399	kref_put(&intf->refcount, intf_free);
3400}
3401
3402int ipmi_add_smi(struct module         *owner,
3403		 const struct ipmi_smi_handlers *handlers,
3404		 void		       *send_info,
3405		 struct device         *si_dev,
3406		 unsigned char         slave_addr)
3407{
3408	int              i, j;
3409	int              rv;
3410	struct ipmi_smi *intf, *tintf;
3411	struct list_head *link;
3412	struct ipmi_device_id id;
3413
3414	/*
3415	 * Make sure the driver is actually initialized, this handles
3416	 * problems with initialization order.
3417	 */
3418	rv = ipmi_init_msghandler();
3419	if (rv)
3420		return rv;
3421
3422	intf = kzalloc(sizeof(*intf), GFP_KERNEL);
3423	if (!intf)
3424		return -ENOMEM;
3425
3426	rv = init_srcu_struct(&intf->users_srcu);
3427	if (rv) {
3428		kfree(intf);
3429		return rv;
3430	}
3431
3432	intf->owner = owner;
3433	intf->bmc = &intf->tmp_bmc;
3434	INIT_LIST_HEAD(&intf->bmc->intfs);
3435	mutex_init(&intf->bmc->dyn_mutex);
3436	INIT_LIST_HEAD(&intf->bmc_link);
3437	mutex_init(&intf->bmc_reg_mutex);
3438	intf->intf_num = -1; /* Mark it invalid for now. */
3439	kref_init(&intf->refcount);
3440	INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg);
3441	intf->si_dev = si_dev;
3442	for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
3443		intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR;
3444		intf->addrinfo[j].lun = 2;
3445	}
3446	if (slave_addr != 0)
3447		intf->addrinfo[0].address = slave_addr;
3448	INIT_LIST_HEAD(&intf->users);
3449	intf->handlers = handlers;
3450	intf->send_info = send_info;
3451	spin_lock_init(&intf->seq_lock);
3452	for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
3453		intf->seq_table[j].inuse = 0;
3454		intf->seq_table[j].seqid = 0;
3455	}
3456	intf->curr_seq = 0;
3457	spin_lock_init(&intf->waiting_rcv_msgs_lock);
3458	INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
3459	tasklet_setup(&intf->recv_tasklet,
3460		     smi_recv_tasklet);
3461	atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
3462	spin_lock_init(&intf->xmit_msgs_lock);
3463	INIT_LIST_HEAD(&intf->xmit_msgs);
3464	INIT_LIST_HEAD(&intf->hp_xmit_msgs);
3465	spin_lock_init(&intf->events_lock);
3466	spin_lock_init(&intf->watch_lock);
3467	atomic_set(&intf->event_waiters, 0);
3468	intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3469	INIT_LIST_HEAD(&intf->waiting_events);
3470	intf->waiting_events_count = 0;
3471	mutex_init(&intf->cmd_rcvrs_mutex);
3472	spin_lock_init(&intf->maintenance_mode_lock);
3473	INIT_LIST_HEAD(&intf->cmd_rcvrs);
3474	init_waitqueue_head(&intf->waitq);
3475	for (i = 0; i < IPMI_NUM_STATS; i++)
3476		atomic_set(&intf->stats[i], 0);
3477
3478	mutex_lock(&ipmi_interfaces_mutex);
3479	/* Look for a hole in the numbers. */
3480	i = 0;
3481	link = &ipmi_interfaces;
3482	list_for_each_entry_rcu(tintf, &ipmi_interfaces, link,
3483				ipmi_interfaces_mutex_held()) {
3484		if (tintf->intf_num != i) {
3485			link = &tintf->link;
3486			break;
3487		}
3488		i++;
3489	}
3490	/* Add the new interface in numeric order. */
3491	if (i == 0)
3492		list_add_rcu(&intf->link, &ipmi_interfaces);
3493	else
3494		list_add_tail_rcu(&intf->link, link);
3495
3496	rv = handlers->start_processing(send_info, intf);
3497	if (rv)
3498		goto out_err;
3499
3500	rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
3501	if (rv) {
3502		dev_err(si_dev, "Unable to get the device id: %d\n", rv);
3503		goto out_err_started;
3504	}
3505
3506	mutex_lock(&intf->bmc_reg_mutex);
3507	rv = __scan_channels(intf, &id);
3508	mutex_unlock(&intf->bmc_reg_mutex);
3509	if (rv)
3510		goto out_err_bmc_reg;
3511
3512	/*
3513	 * Keep memory order straight for RCU readers.  Make
3514	 * sure everything else is committed to memory before
3515	 * setting intf_num to mark the interface valid.
3516	 */
3517	smp_wmb();
3518	intf->intf_num = i;
3519	mutex_unlock(&ipmi_interfaces_mutex);
3520
3521	/* After this point the interface is legal to use. */
3522	call_smi_watchers(i, intf->si_dev);
3523
3524	return 0;
3525
3526 out_err_bmc_reg:
3527	ipmi_bmc_unregister(intf);
3528 out_err_started:
3529	if (intf->handlers->shutdown)
3530		intf->handlers->shutdown(intf->send_info);
3531 out_err:
3532	list_del_rcu(&intf->link);
3533	mutex_unlock(&ipmi_interfaces_mutex);
3534	synchronize_srcu(&ipmi_interfaces_srcu);
3535	cleanup_srcu_struct(&intf->users_srcu);
3536	kref_put(&intf->refcount, intf_free);
3537
3538	return rv;
3539}
3540EXPORT_SYMBOL(ipmi_add_smi);
3541
3542static void deliver_smi_err_response(struct ipmi_smi *intf,
3543				     struct ipmi_smi_msg *msg,
3544				     unsigned char err)
3545{
3546	int rv;
3547	msg->rsp[0] = msg->data[0] | 4;
3548	msg->rsp[1] = msg->data[1];
3549	msg->rsp[2] = err;
3550	msg->rsp_size = 3;
3551
3552	/* This will never requeue, but it may ask us to free the message. */
3553	rv = handle_one_recv_msg(intf, msg);
3554	if (rv == 0)
3555		ipmi_free_smi_msg(msg);
3556}
3557
3558static void cleanup_smi_msgs(struct ipmi_smi *intf)
3559{
3560	int              i;
3561	struct seq_table *ent;
3562	struct ipmi_smi_msg *msg;
3563	struct list_head *entry;
3564	struct list_head tmplist;
3565
3566	/* Clear out our transmit queues and hold the messages. */
3567	INIT_LIST_HEAD(&tmplist);
3568	list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
3569	list_splice_tail(&intf->xmit_msgs, &tmplist);
3570
3571	/* Current message first, to preserve order */
3572	while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
3573		/* Wait for the message to clear out. */
3574		schedule_timeout(1);
3575	}
3576
3577	/* No need for locks, the interface is down. */
3578
3579	/*
3580	 * Return errors for all pending messages in queue and in the
3581	 * tables waiting for remote responses.
3582	 */
3583	while (!list_empty(&tmplist)) {
3584		entry = tmplist.next;
3585		list_del(entry);
3586		msg = list_entry(entry, struct ipmi_smi_msg, link);
3587		deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
3588	}
3589
3590	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
3591		ent = &intf->seq_table[i];
3592		if (!ent->inuse)
3593			continue;
3594		deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED);
3595	}
3596}
3597
3598void ipmi_unregister_smi(struct ipmi_smi *intf)
3599{
3600	struct ipmi_smi_watcher *w;
3601	int intf_num = intf->intf_num, index;
3602
3603	mutex_lock(&ipmi_interfaces_mutex);
3604	intf->intf_num = -1;
3605	intf->in_shutdown = true;
3606	list_del_rcu(&intf->link);
3607	mutex_unlock(&ipmi_interfaces_mutex);
3608	synchronize_srcu(&ipmi_interfaces_srcu);
3609
3610	/* At this point no users can be added to the interface. */
3611
3612	/*
3613	 * Call all the watcher interfaces to tell them that
3614	 * an interface is going away.
3615	 */
3616	mutex_lock(&smi_watchers_mutex);
3617	list_for_each_entry(w, &smi_watchers, link)
3618		w->smi_gone(intf_num);
3619	mutex_unlock(&smi_watchers_mutex);
3620
3621	index = srcu_read_lock(&intf->users_srcu);
3622	while (!list_empty(&intf->users)) {
3623		struct ipmi_user *user =
3624			container_of(list_next_rcu(&intf->users),
3625				     struct ipmi_user, link);
3626
3627		_ipmi_destroy_user(user);
3628	}
3629	srcu_read_unlock(&intf->users_srcu, index);
3630
3631	if (intf->handlers->shutdown)
3632		intf->handlers->shutdown(intf->send_info);
3633
3634	cleanup_smi_msgs(intf);
3635
3636	ipmi_bmc_unregister(intf);
3637
3638	cleanup_srcu_struct(&intf->users_srcu);
3639	kref_put(&intf->refcount, intf_free);
3640}
3641EXPORT_SYMBOL(ipmi_unregister_smi);
3642
3643static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf,
3644				   struct ipmi_smi_msg *msg)
3645{
3646	struct ipmi_ipmb_addr ipmb_addr;
3647	struct ipmi_recv_msg  *recv_msg;
3648
3649	/*
3650	 * This is 11, not 10, because the response must contain a
3651	 * completion code.
3652	 */
3653	if (msg->rsp_size < 11) {
3654		/* Message not big enough, just ignore it. */
3655		ipmi_inc_stat(intf, invalid_ipmb_responses);
3656		return 0;
3657	}
3658
3659	if (msg->rsp[2] != 0) {
3660		/* An error getting the response, just ignore it. */
3661		return 0;
3662	}
3663
3664	ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
3665	ipmb_addr.slave_addr = msg->rsp[6];
3666	ipmb_addr.channel = msg->rsp[3] & 0x0f;
3667	ipmb_addr.lun = msg->rsp[7] & 3;
3668
3669	/*
3670	 * It's a response from a remote entity.  Look up the sequence
3671	 * number and handle the response.
3672	 */
3673	if (intf_find_seq(intf,
3674			  msg->rsp[7] >> 2,
3675			  msg->rsp[3] & 0x0f,
3676			  msg->rsp[8],
3677			  (msg->rsp[4] >> 2) & (~1),
3678			  (struct ipmi_addr *) &ipmb_addr,
3679			  &recv_msg)) {
3680		/*
3681		 * We were unable to find the sequence number,
3682		 * so just nuke the message.
3683		 */
3684		ipmi_inc_stat(intf, unhandled_ipmb_responses);
3685		return 0;
3686	}
3687
3688	memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9);
3689	/*
3690	 * The other fields matched, so no need to set them, except
3691	 * for netfn, which needs to be the response that was
3692	 * returned, not the request value.
3693	 */
3694	recv_msg->msg.netfn = msg->rsp[4] >> 2;
3695	recv_msg->msg.data = recv_msg->msg_data;
3696	recv_msg->msg.data_len = msg->rsp_size - 10;
3697	recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3698	if (deliver_response(intf, recv_msg))
3699		ipmi_inc_stat(intf, unhandled_ipmb_responses);
3700	else
3701		ipmi_inc_stat(intf, handled_ipmb_responses);
3702
3703	return 0;
3704}
3705
3706static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
3707				   struct ipmi_smi_msg *msg)
3708{
3709	struct cmd_rcvr          *rcvr;
3710	int                      rv = 0;
3711	unsigned char            netfn;
3712	unsigned char            cmd;
3713	unsigned char            chan;
3714	struct ipmi_user         *user = NULL;
3715	struct ipmi_ipmb_addr    *ipmb_addr;
3716	struct ipmi_recv_msg     *recv_msg;
3717
3718	if (msg->rsp_size < 10) {
3719		/* Message not big enough, just ignore it. */
3720		ipmi_inc_stat(intf, invalid_commands);
3721		return 0;
3722	}
3723
3724	if (msg->rsp[2] != 0) {
3725		/* An error getting the response, just ignore it. */
3726		return 0;
3727	}
3728
3729	netfn = msg->rsp[4] >> 2;
3730	cmd = msg->rsp[8];
3731	chan = msg->rsp[3] & 0xf;
3732
3733	rcu_read_lock();
3734	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3735	if (rcvr) {
3736		user = rcvr->user;
3737		kref_get(&user->refcount);
3738	} else
3739		user = NULL;
3740	rcu_read_unlock();
3741
3742	if (user == NULL) {
3743		/* We didn't find a user, deliver an error response. */
3744		ipmi_inc_stat(intf, unhandled_commands);
3745
3746		msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
3747		msg->data[1] = IPMI_SEND_MSG_CMD;
3748		msg->data[2] = msg->rsp[3];
3749		msg->data[3] = msg->rsp[6];
3750		msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
3751		msg->data[5] = ipmb_checksum(&msg->data[3], 2);
3752		msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address;
3753		/* rqseq/lun */
3754		msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
3755		msg->data[8] = msg->rsp[8]; /* cmd */
3756		msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
3757		msg->data[10] = ipmb_checksum(&msg->data[6], 4);
3758		msg->data_size = 11;
3759
3760		pr_debug("Invalid command: %*ph\n", msg->data_size, msg->data);
3761
3762		rcu_read_lock();
3763		if (!intf->in_shutdown) {
3764			smi_send(intf, intf->handlers, msg, 0);
3765			/*
3766			 * We used the message, so return the value
3767			 * that causes it to not be freed or
3768			 * queued.
3769			 */
3770			rv = -1;
3771		}
3772		rcu_read_unlock();
3773	} else {
3774		recv_msg = ipmi_alloc_recv_msg();
3775		if (!recv_msg) {
3776			/*
3777			 * We couldn't allocate memory for the
3778			 * message, so requeue it for handling
3779			 * later.
3780			 */
3781			rv = 1;
3782			kref_put(&user->refcount, free_user);
3783		} else {
3784			/* Extract the source address from the data. */
3785			ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
3786			ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
3787			ipmb_addr->slave_addr = msg->rsp[6];
3788			ipmb_addr->lun = msg->rsp[7] & 3;
3789			ipmb_addr->channel = msg->rsp[3] & 0xf;
3790
3791			/*
3792			 * Extract the rest of the message information
3793			 * from the IPMB header.
3794			 */
3795			recv_msg->user = user;
3796			recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3797			recv_msg->msgid = msg->rsp[7] >> 2;
3798			recv_msg->msg.netfn = msg->rsp[4] >> 2;
3799			recv_msg->msg.cmd = msg->rsp[8];
3800			recv_msg->msg.data = recv_msg->msg_data;
3801
3802			/*
3803			 * We chop off 10, not 9 bytes because the checksum
3804			 * at the end also needs to be removed.
3805			 */
3806			recv_msg->msg.data_len = msg->rsp_size - 10;
3807			memcpy(recv_msg->msg_data, &msg->rsp[9],
3808			       msg->rsp_size - 10);
3809			if (deliver_response(intf, recv_msg))
3810				ipmi_inc_stat(intf, unhandled_commands);
3811			else
3812				ipmi_inc_stat(intf, handled_commands);
3813		}
3814	}
3815
3816	return rv;
3817}
3818
3819static int handle_lan_get_msg_rsp(struct ipmi_smi *intf,
3820				  struct ipmi_smi_msg *msg)
3821{
3822	struct ipmi_lan_addr  lan_addr;
3823	struct ipmi_recv_msg  *recv_msg;
3824
3825
3826	/*
3827	 * This is 13, not 12, because the response must contain a
3828	 * completion code.
3829	 */
3830	if (msg->rsp_size < 13) {
3831		/* Message not big enough, just ignore it. */
3832		ipmi_inc_stat(intf, invalid_lan_responses);
3833		return 0;
3834	}
3835
3836	if (msg->rsp[2] != 0) {
3837		/* An error getting the response, just ignore it. */
3838		return 0;
3839	}
3840
3841	lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
3842	lan_addr.session_handle = msg->rsp[4];
3843	lan_addr.remote_SWID = msg->rsp[8];
3844	lan_addr.local_SWID = msg->rsp[5];
3845	lan_addr.channel = msg->rsp[3] & 0x0f;
3846	lan_addr.privilege = msg->rsp[3] >> 4;
3847	lan_addr.lun = msg->rsp[9] & 3;
3848
3849	/*
3850	 * It's a response from a remote entity.  Look up the sequence
3851	 * number and handle the response.
3852	 */
3853	if (intf_find_seq(intf,
3854			  msg->rsp[9] >> 2,
3855			  msg->rsp[3] & 0x0f,
3856			  msg->rsp[10],
3857			  (msg->rsp[6] >> 2) & (~1),
3858			  (struct ipmi_addr *) &lan_addr,
3859			  &recv_msg)) {
3860		/*
3861		 * We were unable to find the sequence number,
3862		 * so just nuke the message.
3863		 */
3864		ipmi_inc_stat(intf, unhandled_lan_responses);
3865		return 0;
3866	}
3867
3868	memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11);
3869	/*
3870	 * The other fields matched, so no need to set them, except
3871	 * for netfn, which needs to be the response that was
3872	 * returned, not the request value.
3873	 */
3874	recv_msg->msg.netfn = msg->rsp[6] >> 2;
3875	recv_msg->msg.data = recv_msg->msg_data;
3876	recv_msg->msg.data_len = msg->rsp_size - 12;
3877	recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3878	if (deliver_response(intf, recv_msg))
3879		ipmi_inc_stat(intf, unhandled_lan_responses);
3880	else
3881		ipmi_inc_stat(intf, handled_lan_responses);
3882
3883	return 0;
3884}
3885
3886static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
3887				  struct ipmi_smi_msg *msg)
3888{
3889	struct cmd_rcvr          *rcvr;
3890	int                      rv = 0;
3891	unsigned char            netfn;
3892	unsigned char            cmd;
3893	unsigned char            chan;
3894	struct ipmi_user         *user = NULL;
3895	struct ipmi_lan_addr     *lan_addr;
3896	struct ipmi_recv_msg     *recv_msg;
3897
3898	if (msg->rsp_size < 12) {
3899		/* Message not big enough, just ignore it. */
3900		ipmi_inc_stat(intf, invalid_commands);
3901		return 0;
3902	}
3903
3904	if (msg->rsp[2] != 0) {
3905		/* An error getting the response, just ignore it. */
3906		return 0;
3907	}
3908
3909	netfn = msg->rsp[6] >> 2;
3910	cmd = msg->rsp[10];
3911	chan = msg->rsp[3] & 0xf;
3912
3913	rcu_read_lock();
3914	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3915	if (rcvr) {
3916		user = rcvr->user;
3917		kref_get(&user->refcount);
3918	} else
3919		user = NULL;
3920	rcu_read_unlock();
3921
3922	if (user == NULL) {
3923		/* We didn't find a user, just give up. */
3924		ipmi_inc_stat(intf, unhandled_commands);
3925
3926		/*
3927		 * Don't do anything with these messages, just allow
3928		 * them to be freed.
3929		 */
3930		rv = 0;
3931	} else {
3932		recv_msg = ipmi_alloc_recv_msg();
3933		if (!recv_msg) {
3934			/*
3935			 * We couldn't allocate memory for the
3936			 * message, so requeue it for handling later.
3937			 */
3938			rv = 1;
3939			kref_put(&user->refcount, free_user);
3940		} else {
3941			/* Extract the source address from the data. */
3942			lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
3943			lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
3944			lan_addr->session_handle = msg->rsp[4];
3945			lan_addr->remote_SWID = msg->rsp[8];
3946			lan_addr->local_SWID = msg->rsp[5];
3947			lan_addr->lun = msg->rsp[9] & 3;
3948			lan_addr->channel = msg->rsp[3] & 0xf;
3949			lan_addr->privilege = msg->rsp[3] >> 4;
3950
3951			/*
3952			 * Extract the rest of the message information
3953			 * from the IPMB header.
3954			 */
3955			recv_msg->user = user;
3956			recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3957			recv_msg->msgid = msg->rsp[9] >> 2;
3958			recv_msg->msg.netfn = msg->rsp[6] >> 2;
3959			recv_msg->msg.cmd = msg->rsp[10];
3960			recv_msg->msg.data = recv_msg->msg_data;
3961
3962			/*
3963			 * We chop off 12, not 11 bytes because the checksum
3964			 * at the end also needs to be removed.
3965			 */
3966			recv_msg->msg.data_len = msg->rsp_size - 12;
3967			memcpy(recv_msg->msg_data, &msg->rsp[11],
3968			       msg->rsp_size - 12);
3969			if (deliver_response(intf, recv_msg))
3970				ipmi_inc_stat(intf, unhandled_commands);
3971			else
3972				ipmi_inc_stat(intf, handled_commands);
3973		}
3974	}
3975
3976	return rv;
3977}
3978
3979/*
3980 * This routine will handle "Get Message" command responses with
3981 * channels that use an OEM Medium. The message format belongs to
3982 * the OEM.  See IPMI 2.0 specification, Chapter 6 and
3983 * Chapter 22, sections 22.6 and 22.24 for more details.
3984 */
3985static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
3986				  struct ipmi_smi_msg *msg)
3987{
3988	struct cmd_rcvr       *rcvr;
3989	int                   rv = 0;
3990	unsigned char         netfn;
3991	unsigned char         cmd;
3992	unsigned char         chan;
3993	struct ipmi_user *user = NULL;
3994	struct ipmi_system_interface_addr *smi_addr;
3995	struct ipmi_recv_msg  *recv_msg;
3996
3997	/*
3998	 * We expect the OEM SW to perform error checking
3999	 * so we just do some basic sanity checks
4000	 */
4001	if (msg->rsp_size < 4) {
4002		/* Message not big enough, just ignore it. */
4003		ipmi_inc_stat(intf, invalid_commands);
4004		return 0;
4005	}
4006
4007	if (msg->rsp[2] != 0) {
4008		/* An error getting the response, just ignore it. */
4009		return 0;
4010	}
4011
4012	/*
4013	 * This is an OEM Message so the OEM needs to know how
4014	 * handle the message. We do no interpretation.
4015	 */
4016	netfn = msg->rsp[0] >> 2;
4017	cmd = msg->rsp[1];
4018	chan = msg->rsp[3] & 0xf;
4019
4020	rcu_read_lock();
4021	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
4022	if (rcvr) {
4023		user = rcvr->user;
4024		kref_get(&user->refcount);
4025	} else
4026		user = NULL;
4027	rcu_read_unlock();
4028
4029	if (user == NULL) {
4030		/* We didn't find a user, just give up. */
4031		ipmi_inc_stat(intf, unhandled_commands);
4032
4033		/*
4034		 * Don't do anything with these messages, just allow
4035		 * them to be freed.
4036		 */
4037
4038		rv = 0;
4039	} else {
4040		recv_msg = ipmi_alloc_recv_msg();
4041		if (!recv_msg) {
4042			/*
4043			 * We couldn't allocate memory for the
4044			 * message, so requeue it for handling
4045			 * later.
4046			 */
4047			rv = 1;
4048			kref_put(&user->refcount, free_user);
4049		} else {
4050			/*
4051			 * OEM Messages are expected to be delivered via
4052			 * the system interface to SMS software.  We might
4053			 * need to visit this again depending on OEM
4054			 * requirements
4055			 */
4056			smi_addr = ((struct ipmi_system_interface_addr *)
4057				    &recv_msg->addr);
4058			smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4059			smi_addr->channel = IPMI_BMC_CHANNEL;
4060			smi_addr->lun = msg->rsp[0] & 3;
4061
4062			recv_msg->user = user;
4063			recv_msg->user_msg_data = NULL;
4064			recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
4065			recv_msg->msg.netfn = msg->rsp[0] >> 2;
4066			recv_msg->msg.cmd = msg->rsp[1];
4067			recv_msg->msg.data = recv_msg->msg_data;
4068
4069			/*
4070			 * The message starts at byte 4 which follows the
4071			 * the Channel Byte in the "GET MESSAGE" command
4072			 */
4073			recv_msg->msg.data_len = msg->rsp_size - 4;
4074			memcpy(recv_msg->msg_data, &msg->rsp[4],
4075			       msg->rsp_size - 4);
4076			if (deliver_response(intf, recv_msg))
4077				ipmi_inc_stat(intf, unhandled_commands);
4078			else
4079				ipmi_inc_stat(intf, handled_commands);
4080		}
4081	}
4082
4083	return rv;
4084}
4085
4086static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
4087				     struct ipmi_smi_msg  *msg)
4088{
4089	struct ipmi_system_interface_addr *smi_addr;
4090
4091	recv_msg->msgid = 0;
4092	smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr;
4093	smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4094	smi_addr->channel = IPMI_BMC_CHANNEL;
4095	smi_addr->lun = msg->rsp[0] & 3;
4096	recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
4097	recv_msg->msg.netfn = msg->rsp[0] >> 2;
4098	recv_msg->msg.cmd = msg->rsp[1];
4099	memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3);
4100	recv_msg->msg.data = recv_msg->msg_data;
4101	recv_msg->msg.data_len = msg->rsp_size - 3;
4102}
4103
4104static int handle_read_event_rsp(struct ipmi_smi *intf,
4105				 struct ipmi_smi_msg *msg)
4106{
4107	struct ipmi_recv_msg *recv_msg, *recv_msg2;
4108	struct list_head     msgs;
4109	struct ipmi_user     *user;
4110	int rv = 0, deliver_count = 0, index;
4111	unsigned long        flags;
4112
4113	if (msg->rsp_size < 19) {
4114		/* Message is too small to be an IPMB event. */
4115		ipmi_inc_stat(intf, invalid_events);
4116		return 0;
4117	}
4118
4119	if (msg->rsp[2] != 0) {
4120		/* An error getting the event, just ignore it. */
4121		return 0;
4122	}
4123
4124	INIT_LIST_HEAD(&msgs);
4125
4126	spin_lock_irqsave(&intf->events_lock, flags);
4127
4128	ipmi_inc_stat(intf, events);
4129
4130	/*
4131	 * Allocate and fill in one message for every user that is
4132	 * getting events.
4133	 */
4134	index = srcu_read_lock(&intf->users_srcu);
4135	list_for_each_entry_rcu(user, &intf->users, link) {
4136		if (!user->gets_events)
4137			continue;
4138
4139		recv_msg = ipmi_alloc_recv_msg();
4140		if (!recv_msg) {
4141			rcu_read_unlock();
4142			list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
4143						 link) {
4144				list_del(&recv_msg->link);
4145				ipmi_free_recv_msg(recv_msg);
4146			}
4147			/*
4148			 * We couldn't allocate memory for the
4149			 * message, so requeue it for handling
4150			 * later.
4151			 */
4152			rv = 1;
4153			goto out;
4154		}
4155
4156		deliver_count++;
4157
4158		copy_event_into_recv_msg(recv_msg, msg);
4159		recv_msg->user = user;
4160		kref_get(&user->refcount);
4161		list_add_tail(&recv_msg->link, &msgs);
4162	}
4163	srcu_read_unlock(&intf->users_srcu, index);
4164
4165	if (deliver_count) {
4166		/* Now deliver all the messages. */
4167		list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
4168			list_del(&recv_msg->link);
4169			deliver_local_response(intf, recv_msg);
4170		}
4171	} else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
4172		/*
4173		 * No one to receive the message, put it in queue if there's
4174		 * not already too many things in the queue.
4175		 */
4176		recv_msg = ipmi_alloc_recv_msg();
4177		if (!recv_msg) {
4178			/*
4179			 * We couldn't allocate memory for the
4180			 * message, so requeue it for handling
4181			 * later.
4182			 */
4183			rv = 1;
4184			goto out;
4185		}
4186
4187		copy_event_into_recv_msg(recv_msg, msg);
4188		list_add_tail(&recv_msg->link, &intf->waiting_events);
4189		intf->waiting_events_count++;
4190	} else if (!intf->event_msg_printed) {
4191		/*
4192		 * There's too many things in the queue, discard this
4193		 * message.
4194		 */
4195		dev_warn(intf->si_dev,
4196			 "Event queue full, discarding incoming events\n");
4197		intf->event_msg_printed = 1;
4198	}
4199
4200 out:
4201	spin_unlock_irqrestore(&intf->events_lock, flags);
4202
4203	return rv;
4204}
4205
4206static int handle_bmc_rsp(struct ipmi_smi *intf,
4207			  struct ipmi_smi_msg *msg)
4208{
4209	struct ipmi_recv_msg *recv_msg;
4210	struct ipmi_system_interface_addr *smi_addr;
4211
4212	recv_msg = (struct ipmi_recv_msg *) msg->user_data;
4213	if (recv_msg == NULL) {
4214		dev_warn(intf->si_dev,
4215			 "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error.  Contact your hardware vendor for assistance.\n");
4216		return 0;
4217	}
4218
4219	recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4220	recv_msg->msgid = msg->msgid;
4221	smi_addr = ((struct ipmi_system_interface_addr *)
4222		    &recv_msg->addr);
4223	smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4224	smi_addr->channel = IPMI_BMC_CHANNEL;
4225	smi_addr->lun = msg->rsp[0] & 3;
4226	recv_msg->msg.netfn = msg->rsp[0] >> 2;
4227	recv_msg->msg.cmd = msg->rsp[1];
4228	memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2);
4229	recv_msg->msg.data = recv_msg->msg_data;
4230	recv_msg->msg.data_len = msg->rsp_size - 2;
4231	deliver_local_response(intf, recv_msg);
4232
4233	return 0;
4234}
4235
4236/*
4237 * Handle a received message.  Return 1 if the message should be requeued,
4238 * 0 if the message should be freed, or -1 if the message should not
4239 * be freed or requeued.
4240 */
4241static int handle_one_recv_msg(struct ipmi_smi *intf,
4242			       struct ipmi_smi_msg *msg)
4243{
4244	int requeue;
4245	int chan;
4246
4247	pr_debug("Recv: %*ph\n", msg->rsp_size, msg->rsp);
4248
4249	if ((msg->data_size >= 2)
4250	    && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
4251	    && (msg->data[1] == IPMI_SEND_MSG_CMD)
4252	    && (msg->user_data == NULL)) {
4253
4254		if (intf->in_shutdown)
4255			goto free_msg;
4256
4257		/*
4258		 * This is the local response to a command send, start
4259		 * the timer for these.  The user_data will not be
4260		 * NULL if this is a response send, and we will let
4261		 * response sends just go through.
4262		 */
4263
4264		/*
4265		 * Check for errors, if we get certain errors (ones
4266		 * that mean basically we can try again later), we
4267		 * ignore them and start the timer.  Otherwise we
4268		 * report the error immediately.
4269		 */
4270		if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
4271		    && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
4272		    && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
4273		    && (msg->rsp[2] != IPMI_BUS_ERR)
4274		    && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
4275			int ch = msg->rsp[3] & 0xf;
4276			struct ipmi_channel *chans;
4277
4278			/* Got an error sending the message, handle it. */
4279
4280			chans = READ_ONCE(intf->channel_list)->c;
4281			if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
4282			    || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
4283				ipmi_inc_stat(intf, sent_lan_command_errs);
4284			else
4285				ipmi_inc_stat(intf, sent_ipmb_command_errs);
4286			intf_err_seq(intf, msg->msgid, msg->rsp[2]);
4287		} else
4288			/* The message was sent, start the timer. */
4289			intf_start_seq_timer(intf, msg->msgid);
4290free_msg:
4291		requeue = 0;
4292		goto out;
4293
4294	} else if (msg->rsp_size < 2) {
4295		/* Message is too small to be correct. */
4296		dev_warn(intf->si_dev,
4297			 "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
4298			 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
4299
4300		/* Generate an error response for the message. */
4301		msg->rsp[0] = msg->data[0] | (1 << 2);
4302		msg->rsp[1] = msg->data[1];
4303		msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
4304		msg->rsp_size = 3;
4305	} else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
4306		   || (msg->rsp[1] != msg->data[1])) {
4307		/*
4308		 * The NetFN and Command in the response is not even
4309		 * marginally correct.
4310		 */
4311		dev_warn(intf->si_dev,
4312			 "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
4313			 (msg->data[0] >> 2) | 1, msg->data[1],
4314			 msg->rsp[0] >> 2, msg->rsp[1]);
4315
4316		/* Generate an error response for the message. */
4317		msg->rsp[0] = msg->data[0] | (1 << 2);
4318		msg->rsp[1] = msg->data[1];
4319		msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
4320		msg->rsp_size = 3;
4321	}
4322
4323	if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4324	    && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
4325	    && (msg->user_data != NULL)) {
4326		/*
4327		 * It's a response to a response we sent.  For this we
4328		 * deliver a send message response to the user.
4329		 */
4330		struct ipmi_recv_msg *recv_msg = msg->user_data;
4331
4332		requeue = 0;
4333		if (msg->rsp_size < 2)
4334			/* Message is too small to be correct. */
4335			goto out;
4336
4337		chan = msg->data[2] & 0x0f;
4338		if (chan >= IPMI_MAX_CHANNELS)
4339			/* Invalid channel number */
4340			goto out;
4341
4342		if (!recv_msg)
4343			goto out;
4344
4345		recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
4346		recv_msg->msg.data = recv_msg->msg_data;
4347		recv_msg->msg.data_len = 1;
4348		recv_msg->msg_data[0] = msg->rsp[2];
4349		deliver_local_response(intf, recv_msg);
4350	} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4351		   && (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
4352		struct ipmi_channel   *chans;
4353
4354		/* It's from the receive queue. */
4355		chan = msg->rsp[3] & 0xf;
4356		if (chan >= IPMI_MAX_CHANNELS) {
4357			/* Invalid channel number */
4358			requeue = 0;
4359			goto out;
4360		}
4361
4362		/*
4363		 * We need to make sure the channels have been initialized.
4364		 * The channel_handler routine will set the "curr_channel"
4365		 * equal to or greater than IPMI_MAX_CHANNELS when all the
4366		 * channels for this interface have been initialized.
4367		 */
4368		if (!intf->channels_ready) {
4369			requeue = 0; /* Throw the message away */
4370			goto out;
4371		}
4372
4373		chans = READ_ONCE(intf->channel_list)->c;
4374
4375		switch (chans[chan].medium) {
4376		case IPMI_CHANNEL_MEDIUM_IPMB:
4377			if (msg->rsp[4] & 0x04) {
4378				/*
4379				 * It's a response, so find the
4380				 * requesting message and send it up.
4381				 */
4382				requeue = handle_ipmb_get_msg_rsp(intf, msg);
4383			} else {
4384				/*
4385				 * It's a command to the SMS from some other
4386				 * entity.  Handle that.
4387				 */
4388				requeue = handle_ipmb_get_msg_cmd(intf, msg);
4389			}
4390			break;
4391
4392		case IPMI_CHANNEL_MEDIUM_8023LAN:
4393		case IPMI_CHANNEL_MEDIUM_ASYNC:
4394			if (msg->rsp[6] & 0x04) {
4395				/*
4396				 * It's a response, so find the
4397				 * requesting message and send it up.
4398				 */
4399				requeue = handle_lan_get_msg_rsp(intf, msg);
4400			} else {
4401				/*
4402				 * It's a command to the SMS from some other
4403				 * entity.  Handle that.
4404				 */
4405				requeue = handle_lan_get_msg_cmd(intf, msg);
4406			}
4407			break;
4408
4409		default:
4410			/* Check for OEM Channels.  Clients had better
4411			   register for these commands. */
4412			if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
4413			    && (chans[chan].medium
4414				<= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
4415				requeue = handle_oem_get_msg_cmd(intf, msg);
4416			} else {
4417				/*
4418				 * We don't handle the channel type, so just
4419				 * free the message.
4420				 */
4421				requeue = 0;
4422			}
4423		}
4424
4425	} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4426		   && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
4427		/* It's an asynchronous event. */
4428		requeue = handle_read_event_rsp(intf, msg);
4429	} else {
4430		/* It's a response from the local BMC. */
4431		requeue = handle_bmc_rsp(intf, msg);
4432	}
4433
4434 out:
4435	return requeue;
4436}
4437
4438/*
4439 * If there are messages in the queue or pretimeouts, handle them.
4440 */
4441static void handle_new_recv_msgs(struct ipmi_smi *intf)
4442{
4443	struct ipmi_smi_msg  *smi_msg;
4444	unsigned long        flags = 0;
4445	int                  rv;
4446	int                  run_to_completion = intf->run_to_completion;
4447
4448	/* See if any waiting messages need to be processed. */
4449	if (!run_to_completion)
4450		spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4451	while (!list_empty(&intf->waiting_rcv_msgs)) {
4452		smi_msg = list_entry(intf->waiting_rcv_msgs.next,
4453				     struct ipmi_smi_msg, link);
4454		list_del(&smi_msg->link);
4455		if (!run_to_completion)
4456			spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4457					       flags);
4458		rv = handle_one_recv_msg(intf, smi_msg);
4459		if (!run_to_completion)
4460			spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4461		if (rv > 0) {
4462			/*
4463			 * To preserve message order, quit if we
4464			 * can't handle a message.  Add the message
4465			 * back at the head, this is safe because this
4466			 * tasklet is the only thing that pulls the
4467			 * messages.
4468			 */
4469			list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
4470			break;
4471		} else {
4472			if (rv == 0)
4473				/* Message handled */
4474				ipmi_free_smi_msg(smi_msg);
4475			/* If rv < 0, fatal error, del but don't free. */
4476		}
4477	}
4478	if (!run_to_completion)
4479		spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
4480
4481	/*
4482	 * If the pretimout count is non-zero, decrement one from it and
4483	 * deliver pretimeouts to all the users.
4484	 */
4485	if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
4486		struct ipmi_user *user;
4487		int index;
4488
4489		index = srcu_read_lock(&intf->users_srcu);
4490		list_for_each_entry_rcu(user, &intf->users, link) {
4491			if (user->handler->ipmi_watchdog_pretimeout)
4492				user->handler->ipmi_watchdog_pretimeout(
4493					user->handler_data);
4494		}
4495		srcu_read_unlock(&intf->users_srcu, index);
4496	}
4497}
4498
4499static void smi_recv_tasklet(struct tasklet_struct *t)
4500{
4501	unsigned long flags = 0; /* keep us warning-free. */
4502	struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet);
4503	int run_to_completion = intf->run_to_completion;
4504	struct ipmi_smi_msg *newmsg = NULL;
4505
4506	/*
4507	 * Start the next message if available.
4508	 *
4509	 * Do this here, not in the actual receiver, because we may deadlock
4510	 * because the lower layer is allowed to hold locks while calling
4511	 * message delivery.
4512	 */
4513
4514	rcu_read_lock();
4515
4516	if (!run_to_completion)
4517		spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4518	if (intf->curr_msg == NULL && !intf->in_shutdown) {
4519		struct list_head *entry = NULL;
4520
4521		/* Pick the high priority queue first. */
4522		if (!list_empty(&intf->hp_xmit_msgs))
4523			entry = intf->hp_xmit_msgs.next;
4524		else if (!list_empty(&intf->xmit_msgs))
4525			entry = intf->xmit_msgs.next;
4526
4527		if (entry) {
4528			list_del(entry);
4529			newmsg = list_entry(entry, struct ipmi_smi_msg, link);
4530			intf->curr_msg = newmsg;
4531		}
4532	}
4533
4534	if (!run_to_completion)
4535		spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4536	if (newmsg)
4537		intf->handlers->sender(intf->send_info, newmsg);
4538
4539	rcu_read_unlock();
4540
4541	handle_new_recv_msgs(intf);
4542}
4543
4544/* Handle a new message from the lower layer. */
4545void ipmi_smi_msg_received(struct ipmi_smi *intf,
4546			   struct ipmi_smi_msg *msg)
4547{
4548	unsigned long flags = 0; /* keep us warning-free. */
4549	int run_to_completion = intf->run_to_completion;
4550
4551	/*
4552	 * To preserve message order, we keep a queue and deliver from
4553	 * a tasklet.
4554	 */
4555	if (!run_to_completion)
4556		spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4557	list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
4558	if (!run_to_completion)
4559		spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4560				       flags);
4561
4562	if (!run_to_completion)
4563		spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4564	/*
4565	 * We can get an asynchronous event or receive message in addition
4566	 * to commands we send.
4567	 */
4568	if (msg == intf->curr_msg)
4569		intf->curr_msg = NULL;
4570	if (!run_to_completion)
4571		spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4572
4573	if (run_to_completion)
4574		smi_recv_tasklet(&intf->recv_tasklet);
4575	else
4576		tasklet_schedule(&intf->recv_tasklet);
4577}
4578EXPORT_SYMBOL(ipmi_smi_msg_received);
4579
4580void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
4581{
4582	if (intf->in_shutdown)
4583		return;
4584
4585	atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
4586	tasklet_schedule(&intf->recv_tasklet);
4587}
4588EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
4589
4590static struct ipmi_smi_msg *
4591smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
4592		  unsigned char seq, long seqid)
4593{
4594	struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
4595	if (!smi_msg)
4596		/*
4597		 * If we can't allocate the message, then just return, we
4598		 * get 4 retries, so this should be ok.
4599		 */
4600		return NULL;
4601
4602	memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
4603	smi_msg->data_size = recv_msg->msg.data_len;
4604	smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
4605
4606	pr_debug("Resend: %*ph\n", smi_msg->data_size, smi_msg->data);
4607
4608	return smi_msg;
4609}
4610
4611static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
4612			      struct list_head *timeouts,
4613			      unsigned long timeout_period,
4614			      int slot, unsigned long *flags,
4615			      bool *need_timer)
4616{
4617	struct ipmi_recv_msg *msg;
4618
4619	if (intf->in_shutdown)
4620		return;
4621
4622	if (!ent->inuse)
4623		return;
4624
4625	if (timeout_period < ent->timeout) {
4626		ent->timeout -= timeout_period;
4627		*need_timer = true;
4628		return;
4629	}
4630
4631	if (ent->retries_left == 0) {
4632		/* The message has used all its retries. */
4633		ent->inuse = 0;
4634		smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
4635		msg = ent->recv_msg;
4636		list_add_tail(&msg->link, timeouts);
4637		if (ent->broadcast)
4638			ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
4639		else if (is_lan_addr(&ent->recv_msg->addr))
4640			ipmi_inc_stat(intf, timed_out_lan_commands);
4641		else
4642			ipmi_inc_stat(intf, timed_out_ipmb_commands);
4643	} else {
4644		struct ipmi_smi_msg *smi_msg;
4645		/* More retries, send again. */
4646
4647		*need_timer = true;
4648
4649		/*
4650		 * Start with the max timer, set to normal timer after
4651		 * the message is sent.
4652		 */
4653		ent->timeout = MAX_MSG_TIMEOUT;
4654		ent->retries_left--;
4655		smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
4656					    ent->seqid);
4657		if (!smi_msg) {
4658			if (is_lan_addr(&ent->recv_msg->addr))
4659				ipmi_inc_stat(intf,
4660					      dropped_rexmit_lan_commands);
4661			else
4662				ipmi_inc_stat(intf,
4663					      dropped_rexmit_ipmb_commands);
4664			return;
4665		}
4666
4667		spin_unlock_irqrestore(&intf->seq_lock, *flags);
4668
4669		/*
4670		 * Send the new message.  We send with a zero
4671		 * priority.  It timed out, I doubt time is that
4672		 * critical now, and high priority messages are really
4673		 * only for messages to the local MC, which don't get
4674		 * resent.
4675		 */
4676		if (intf->handlers) {
4677			if (is_lan_addr(&ent->recv_msg->addr))
4678				ipmi_inc_stat(intf,
4679					      retransmitted_lan_commands);
4680			else
4681				ipmi_inc_stat(intf,
4682					      retransmitted_ipmb_commands);
4683
4684			smi_send(intf, intf->handlers, smi_msg, 0);
4685		} else
4686			ipmi_free_smi_msg(smi_msg);
4687
4688		spin_lock_irqsave(&intf->seq_lock, *flags);
4689	}
4690}
4691
4692static bool ipmi_timeout_handler(struct ipmi_smi *intf,
4693				 unsigned long timeout_period)
4694{
4695	struct list_head     timeouts;
4696	struct ipmi_recv_msg *msg, *msg2;
4697	unsigned long        flags;
4698	int                  i;
4699	bool                 need_timer = false;
4700
4701	if (!intf->bmc_registered) {
4702		kref_get(&intf->refcount);
4703		if (!schedule_work(&intf->bmc_reg_work)) {
4704			kref_put(&intf->refcount, intf_free);
4705			need_timer = true;
4706		}
4707	}
4708
4709	/*
4710	 * Go through the seq table and find any messages that
4711	 * have timed out, putting them in the timeouts
4712	 * list.
4713	 */
4714	INIT_LIST_HEAD(&timeouts);
4715	spin_lock_irqsave(&intf->seq_lock, flags);
4716	if (intf->ipmb_maintenance_mode_timeout) {
4717		if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
4718			intf->ipmb_maintenance_mode_timeout = 0;
4719		else
4720			intf->ipmb_maintenance_mode_timeout -= timeout_period;
4721	}
4722	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
4723		check_msg_timeout(intf, &intf->seq_table[i],
4724				  &timeouts, timeout_period, i,
4725				  &flags, &need_timer);
4726	spin_unlock_irqrestore(&intf->seq_lock, flags);
4727
4728	list_for_each_entry_safe(msg, msg2, &timeouts, link)
4729		deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
4730
4731	/*
4732	 * Maintenance mode handling.  Check the timeout
4733	 * optimistically before we claim the lock.  It may
4734	 * mean a timeout gets missed occasionally, but that
4735	 * only means the timeout gets extended by one period
4736	 * in that case.  No big deal, and it avoids the lock
4737	 * most of the time.
4738	 */
4739	if (intf->auto_maintenance_timeout > 0) {
4740		spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
4741		if (intf->auto_maintenance_timeout > 0) {
4742			intf->auto_maintenance_timeout
4743				-= timeout_period;
4744			if (!intf->maintenance_mode
4745			    && (intf->auto_maintenance_timeout <= 0)) {
4746				intf->maintenance_mode_enable = false;
4747				maintenance_mode_update(intf);
4748			}
4749		}
4750		spin_unlock_irqrestore(&intf->maintenance_mode_lock,
4751				       flags);
4752	}
4753
4754	tasklet_schedule(&intf->recv_tasklet);
4755
4756	return need_timer;
4757}
4758
4759static void ipmi_request_event(struct ipmi_smi *intf)
4760{
4761	/* No event requests when in maintenance mode. */
4762	if (intf->maintenance_mode_enable)
4763		return;
4764
4765	if (!intf->in_shutdown)
4766		intf->handlers->request_events(intf->send_info);
4767}
4768
4769static struct timer_list ipmi_timer;
4770
4771static atomic_t stop_operation;
4772
4773static void ipmi_timeout(struct timer_list *unused)
4774{
4775	struct ipmi_smi *intf;
4776	bool need_timer = false;
4777	int index;
4778
4779	if (atomic_read(&stop_operation))
4780		return;
4781
4782	index = srcu_read_lock(&ipmi_interfaces_srcu);
4783	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4784		if (atomic_read(&intf->event_waiters)) {
4785			intf->ticks_to_req_ev--;
4786			if (intf->ticks_to_req_ev == 0) {
4787				ipmi_request_event(intf);
4788				intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
4789			}
4790			need_timer = true;
4791		}
4792
4793		need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
4794	}
4795	srcu_read_unlock(&ipmi_interfaces_srcu, index);
4796
4797	if (need_timer)
4798		mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4799}
4800
4801static void need_waiter(struct ipmi_smi *intf)
4802{
4803	/* Racy, but worst case we start the timer twice. */
4804	if (!timer_pending(&ipmi_timer))
4805		mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4806}
4807
4808static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
4809static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
4810
4811static void free_smi_msg(struct ipmi_smi_msg *msg)
4812{
4813	atomic_dec(&smi_msg_inuse_count);
4814	/* Try to keep as much stuff out of the panic path as possible. */
4815	if (!oops_in_progress)
4816		kfree(msg);
4817}
4818
4819struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
4820{
4821	struct ipmi_smi_msg *rv;
4822	rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
4823	if (rv) {
4824		rv->done = free_smi_msg;
4825		rv->user_data = NULL;
4826		atomic_inc(&smi_msg_inuse_count);
4827	}
4828	return rv;
4829}
4830EXPORT_SYMBOL(ipmi_alloc_smi_msg);
4831
4832static void free_recv_msg(struct ipmi_recv_msg *msg)
4833{
4834	atomic_dec(&recv_msg_inuse_count);
4835	/* Try to keep as much stuff out of the panic path as possible. */
4836	if (!oops_in_progress)
4837		kfree(msg);
4838}
4839
4840static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
4841{
4842	struct ipmi_recv_msg *rv;
4843
4844	rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
4845	if (rv) {
4846		rv->user = NULL;
4847		rv->done = free_recv_msg;
4848		atomic_inc(&recv_msg_inuse_count);
4849	}
4850	return rv;
4851}
4852
4853void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
4854{
4855	if (msg->user && !oops_in_progress)
4856		kref_put(&msg->user->refcount, free_user);
4857	msg->done(msg);
4858}
4859EXPORT_SYMBOL(ipmi_free_recv_msg);
4860
4861static atomic_t panic_done_count = ATOMIC_INIT(0);
4862
4863static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
4864{
4865	atomic_dec(&panic_done_count);
4866}
4867
4868static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
4869{
4870	atomic_dec(&panic_done_count);
4871}
4872
4873/*
4874 * Inside a panic, send a message and wait for a response.
4875 */
4876static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
4877					struct ipmi_addr *addr,
4878					struct kernel_ipmi_msg *msg)
4879{
4880	struct ipmi_smi_msg  smi_msg;
4881	struct ipmi_recv_msg recv_msg;
4882	int rv;
4883
4884	smi_msg.done = dummy_smi_done_handler;
4885	recv_msg.done = dummy_recv_done_handler;
4886	atomic_add(2, &panic_done_count);
4887	rv = i_ipmi_request(NULL,
4888			    intf,
4889			    addr,
4890			    0,
4891			    msg,
4892			    intf,
4893			    &smi_msg,
4894			    &recv_msg,
4895			    0,
4896			    intf->addrinfo[0].address,
4897			    intf->addrinfo[0].lun,
4898			    0, 1); /* Don't retry, and don't wait. */
4899	if (rv)
4900		atomic_sub(2, &panic_done_count);
4901	else if (intf->handlers->flush_messages)
4902		intf->handlers->flush_messages(intf->send_info);
4903
4904	while (atomic_read(&panic_done_count) != 0)
4905		ipmi_poll(intf);
4906}
4907
4908static void event_receiver_fetcher(struct ipmi_smi *intf,
4909				   struct ipmi_recv_msg *msg)
4910{
4911	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
4912	    && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
4913	    && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
4914	    && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
4915		/* A get event receiver command, save it. */
4916		intf->event_receiver = msg->msg.data[1];
4917		intf->event_receiver_lun = msg->msg.data[2] & 0x3;
4918	}
4919}
4920
4921static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
4922{
4923	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
4924	    && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
4925	    && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
4926	    && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
4927		/*
4928		 * A get device id command, save if we are an event
4929		 * receiver or generator.
4930		 */
4931		intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
4932		intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
4933	}
4934}
4935
4936static void send_panic_events(struct ipmi_smi *intf, char *str)
4937{
4938	struct kernel_ipmi_msg msg;
4939	unsigned char data[16];
4940	struct ipmi_system_interface_addr *si;
4941	struct ipmi_addr addr;
4942	char *p = str;
4943	struct ipmi_ipmb_addr *ipmb;
4944	int j;
4945
4946	if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE)
4947		return;
4948
4949	si = (struct ipmi_system_interface_addr *) &addr;
4950	si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4951	si->channel = IPMI_BMC_CHANNEL;
4952	si->lun = 0;
4953
4954	/* Fill in an event telling that we have failed. */
4955	msg.netfn = 0x04; /* Sensor or Event. */
4956	msg.cmd = 2; /* Platform event command. */
4957	msg.data = data;
4958	msg.data_len = 8;
4959	data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
4960	data[1] = 0x03; /* This is for IPMI 1.0. */
4961	data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
4962	data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
4963	data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
4964
4965	/*
4966	 * Put a few breadcrumbs in.  Hopefully later we can add more things
4967	 * to make the panic events more useful.
4968	 */
4969	if (str) {
4970		data[3] = str[0];
4971		data[6] = str[1];
4972		data[7] = str[2];
4973	}
4974
4975	/* Send the event announcing the panic. */
4976	ipmi_panic_request_and_wait(intf, &addr, &msg);
4977
4978	/*
4979	 * On every interface, dump a bunch of OEM event holding the
4980	 * string.
4981	 */
4982	if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str)
4983		return;
4984
4985	/*
4986	 * intf_num is used as an marker to tell if the
4987	 * interface is valid.  Thus we need a read barrier to
4988	 * make sure data fetched before checking intf_num
4989	 * won't be used.
4990	 */
4991	smp_rmb();
4992
4993	/*
4994	 * First job here is to figure out where to send the
4995	 * OEM events.  There's no way in IPMI to send OEM
4996	 * events using an event send command, so we have to
4997	 * find the SEL to put them in and stick them in
4998	 * there.
4999	 */
5000
5001	/* Get capabilities from the get device id. */
5002	intf->local_sel_device = 0;
5003	intf->local_event_generator = 0;
5004	intf->event_receiver = 0;
5005
5006	/* Request the device info from the local MC. */
5007	msg.netfn = IPMI_NETFN_APP_REQUEST;
5008	msg.cmd = IPMI_GET_DEVICE_ID_CMD;
5009	msg.data = NULL;
5010	msg.data_len = 0;
5011	intf->null_user_handler = device_id_fetcher;
5012	ipmi_panic_request_and_wait(intf, &addr, &msg);
5013
5014	if (intf->local_event_generator) {
5015		/* Request the event receiver from the local MC. */
5016		msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
5017		msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
5018		msg.data = NULL;
5019		msg.data_len = 0;
5020		intf->null_user_handler = event_receiver_fetcher;
5021		ipmi_panic_request_and_wait(intf, &addr, &msg);
5022	}
5023	intf->null_user_handler = NULL;
5024
5025	/*
5026	 * Validate the event receiver.  The low bit must not
5027	 * be 1 (it must be a valid IPMB address), it cannot
5028	 * be zero, and it must not be my address.
5029	 */
5030	if (((intf->event_receiver & 1) == 0)
5031	    && (intf->event_receiver != 0)
5032	    && (intf->event_receiver != intf->addrinfo[0].address)) {
5033		/*
5034		 * The event receiver is valid, send an IPMB
5035		 * message.
5036		 */
5037		ipmb = (struct ipmi_ipmb_addr *) &addr;
5038		ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
5039		ipmb->channel = 0; /* FIXME - is this right? */
5040		ipmb->lun = intf->event_receiver_lun;
5041		ipmb->slave_addr = intf->event_receiver;
5042	} else if (intf->local_sel_device) {
5043		/*
5044		 * The event receiver was not valid (or was
5045		 * me), but I am an SEL device, just dump it
5046		 * in my SEL.
5047		 */
5048		si = (struct ipmi_system_interface_addr *) &addr;
5049		si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
5050		si->channel = IPMI_BMC_CHANNEL;
5051		si->lun = 0;
5052	} else
5053		return; /* No where to send the event. */
5054
5055	msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
5056	msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
5057	msg.data = data;
5058	msg.data_len = 16;
5059
5060	j = 0;
5061	while (*p) {
5062		int size = strlen(p);
5063
5064		if (size > 11)
5065			size = 11;
5066		data[0] = 0;
5067		data[1] = 0;
5068		data[2] = 0xf0; /* OEM event without timestamp. */
5069		data[3] = intf->addrinfo[0].address;
5070		data[4] = j++; /* sequence # */
5071		/*
5072		 * Always give 11 bytes, so strncpy will fill
5073		 * it with zeroes for me.
5074		 */
5075		strncpy(data+5, p, 11);
5076		p += size;
5077
5078		ipmi_panic_request_and_wait(intf, &addr, &msg);
5079	}
5080}
5081
5082static int has_panicked;
5083
5084static int panic_event(struct notifier_block *this,
5085		       unsigned long         event,
5086		       void                  *ptr)
5087{
5088	struct ipmi_smi *intf;
5089	struct ipmi_user *user;
5090
5091	if (has_panicked)
5092		return NOTIFY_DONE;
5093	has_panicked = 1;
5094
5095	/* For every registered interface, set it to run to completion. */
5096	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
5097		if (!intf->handlers || intf->intf_num == -1)
5098			/* Interface is not ready. */
5099			continue;
5100
5101		if (!intf->handlers->poll)
5102			continue;
5103
5104		/*
5105		 * If we were interrupted while locking xmit_msgs_lock or
5106		 * waiting_rcv_msgs_lock, the corresponding list may be
5107		 * corrupted.  In this case, drop items on the list for
5108		 * the safety.
5109		 */
5110		if (!spin_trylock(&intf->xmit_msgs_lock)) {
5111			INIT_LIST_HEAD(&intf->xmit_msgs);
5112			INIT_LIST_HEAD(&intf->hp_xmit_msgs);
5113		} else
5114			spin_unlock(&intf->xmit_msgs_lock);
5115
5116		if (!spin_trylock(&intf->waiting_rcv_msgs_lock))
5117			INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
5118		else
5119			spin_unlock(&intf->waiting_rcv_msgs_lock);
5120
5121		intf->run_to_completion = 1;
5122		if (intf->handlers->set_run_to_completion)
5123			intf->handlers->set_run_to_completion(intf->send_info,
5124							      1);
5125
5126		list_for_each_entry_rcu(user, &intf->users, link) {
5127			if (user->handler->ipmi_panic_handler)
5128				user->handler->ipmi_panic_handler(
5129					user->handler_data);
5130		}
5131
5132		send_panic_events(intf, ptr);
5133	}
5134
5135	return NOTIFY_DONE;
5136}
5137
5138/* Must be called with ipmi_interfaces_mutex held. */
5139static int ipmi_register_driver(void)
5140{
5141	int rv;
5142
5143	if (drvregistered)
5144		return 0;
5145
5146	rv = driver_register(&ipmidriver.driver);
5147	if (rv)
5148		pr_err("Could not register IPMI driver\n");
5149	else
5150		drvregistered = true;
5151	return rv;
5152}
5153
5154static struct notifier_block panic_block = {
5155	.notifier_call	= panic_event,
5156	.next		= NULL,
5157	.priority	= 200	/* priority: INT_MAX >= x >= 0 */
5158};
5159
5160static int ipmi_init_msghandler(void)
5161{
5162	int rv;
5163
5164	mutex_lock(&ipmi_interfaces_mutex);
5165	rv = ipmi_register_driver();
5166	if (rv)
5167		goto out;
5168	if (initialized)
5169		goto out;
5170
5171	rv = init_srcu_struct(&ipmi_interfaces_srcu);
5172	if (rv)
5173		goto out;
5174
5175	remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
5176	if (!remove_work_wq) {
5177		pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
5178		rv = -ENOMEM;
5179		goto out_wq;
5180	}
5181
5182	timer_setup(&ipmi_timer, ipmi_timeout, 0);
5183	mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5184
5185	atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
5186
5187	initialized = true;
5188
5189out_wq:
5190	if (rv)
5191		cleanup_srcu_struct(&ipmi_interfaces_srcu);
5192out:
5193	mutex_unlock(&ipmi_interfaces_mutex);
5194	return rv;
5195}
5196
5197static int __init ipmi_init_msghandler_mod(void)
5198{
5199	int rv;
5200
5201	pr_info("version " IPMI_DRIVER_VERSION "\n");
5202
5203	mutex_lock(&ipmi_interfaces_mutex);
5204	rv = ipmi_register_driver();
5205	mutex_unlock(&ipmi_interfaces_mutex);
5206
5207	return rv;
5208}
5209
5210static void __exit cleanup_ipmi(void)
5211{
5212	int count;
5213
5214	if (initialized) {
5215		destroy_workqueue(remove_work_wq);
5216
5217		atomic_notifier_chain_unregister(&panic_notifier_list,
5218						 &panic_block);
5219
5220		/*
5221		 * This can't be called if any interfaces exist, so no worry
5222		 * about shutting down the interfaces.
5223		 */
5224
5225		/*
5226		 * Tell the timer to stop, then wait for it to stop.  This
5227		 * avoids problems with race conditions removing the timer
5228		 * here.
5229		 */
5230		atomic_set(&stop_operation, 1);
5231		del_timer_sync(&ipmi_timer);
5232
5233		initialized = false;
5234
5235		/* Check for buffer leaks. */
5236		count = atomic_read(&smi_msg_inuse_count);
5237		if (count != 0)
5238			pr_warn("SMI message count %d at exit\n", count);
5239		count = atomic_read(&recv_msg_inuse_count);
5240		if (count != 0)
5241			pr_warn("recv message count %d at exit\n", count);
5242
5243		cleanup_srcu_struct(&ipmi_interfaces_srcu);
5244	}
5245	if (drvregistered)
5246		driver_unregister(&ipmidriver.driver);
5247}
5248module_exit(cleanup_ipmi);
5249
5250module_init(ipmi_init_msghandler_mod);
5251MODULE_LICENSE("GPL");
5252MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
5253MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI"
5254		   " interface.");
5255MODULE_VERSION(IPMI_DRIVER_VERSION);
5256MODULE_SOFTDEP("post: ipmi_devintf");
5257