1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * ipmi_devintf.c
4 *
5 * Linux device interface for the IPMI message handler.
6 *
7 * Author: MontaVista Software, Inc.
8 *         Corey Minyard <minyard@mvista.com>
9 *         source@mvista.com
10 *
11 * Copyright 2002 MontaVista Software Inc.
12 */
13
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/errno.h>
17#include <linux/poll.h>
18#include <linux/sched.h>
19#include <linux/spinlock.h>
20#include <linux/slab.h>
21#include <linux/ipmi.h>
22#include <linux/mutex.h>
23#include <linux/init.h>
24#include <linux/device.h>
25#include <linux/compat.h>
26
27struct ipmi_file_private
28{
29	struct ipmi_user     *user;
30	spinlock_t           recv_msg_lock;
31	struct list_head     recv_msgs;
32	struct fasync_struct *fasync_queue;
33	wait_queue_head_t    wait;
34	struct mutex	     recv_mutex;
35	int                  default_retries;
36	unsigned int         default_retry_time_ms;
37};
38
39static void file_receive_handler(struct ipmi_recv_msg *msg,
40				 void                 *handler_data)
41{
42	struct ipmi_file_private *priv = handler_data;
43	int                      was_empty;
44	unsigned long            flags;
45
46	spin_lock_irqsave(&priv->recv_msg_lock, flags);
47	was_empty = list_empty(&priv->recv_msgs);
48	list_add_tail(&msg->link, &priv->recv_msgs);
49	spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
50
51	if (was_empty) {
52		wake_up_interruptible(&priv->wait);
53		kill_fasync(&priv->fasync_queue, SIGIO, POLL_IN);
54	}
55}
56
57static __poll_t ipmi_poll(struct file *file, poll_table *wait)
58{
59	struct ipmi_file_private *priv = file->private_data;
60	__poll_t             mask = 0;
61	unsigned long            flags;
62
63	poll_wait(file, &priv->wait, wait);
64
65	spin_lock_irqsave(&priv->recv_msg_lock, flags);
66
67	if (!list_empty(&priv->recv_msgs))
68		mask |= (EPOLLIN | EPOLLRDNORM);
69
70	spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
71
72	return mask;
73}
74
75static int ipmi_fasync(int fd, struct file *file, int on)
76{
77	struct ipmi_file_private *priv = file->private_data;
78
79	return fasync_helper(fd, file, on, &priv->fasync_queue);
80}
81
82static const struct ipmi_user_hndl ipmi_hndlrs =
83{
84	.ipmi_recv_hndl	= file_receive_handler,
85};
86
87static int ipmi_open(struct inode *inode, struct file *file)
88{
89	int                      if_num = iminor(inode);
90	int                      rv;
91	struct ipmi_file_private *priv;
92
93	priv = kmalloc(sizeof(*priv), GFP_KERNEL);
94	if (!priv)
95		return -ENOMEM;
96
97	rv = ipmi_create_user(if_num,
98			      &ipmi_hndlrs,
99			      priv,
100			      &priv->user);
101	if (rv) {
102		kfree(priv);
103		goto out;
104	}
105
106	file->private_data = priv;
107
108	spin_lock_init(&priv->recv_msg_lock);
109	INIT_LIST_HEAD(&priv->recv_msgs);
110	init_waitqueue_head(&priv->wait);
111	priv->fasync_queue = NULL;
112	mutex_init(&priv->recv_mutex);
113
114	/* Use the low-level defaults. */
115	priv->default_retries = -1;
116	priv->default_retry_time_ms = 0;
117
118out:
119	return rv;
120}
121
122static int ipmi_release(struct inode *inode, struct file *file)
123{
124	struct ipmi_file_private *priv = file->private_data;
125	int                      rv;
126	struct ipmi_recv_msg *msg, *next;
127
128	rv = ipmi_destroy_user(priv->user);
129	if (rv)
130		return rv;
131
132	list_for_each_entry_safe(msg, next, &priv->recv_msgs, link)
133		ipmi_free_recv_msg(msg);
134
135	kfree(priv);
136
137	return 0;
138}
139
140static int handle_send_req(struct ipmi_user *user,
141			   struct ipmi_req *req,
142			   int             retries,
143			   unsigned int    retry_time_ms)
144{
145	int              rv;
146	struct ipmi_addr addr;
147	struct kernel_ipmi_msg msg;
148
149	if (req->addr_len > sizeof(struct ipmi_addr))
150		return -EINVAL;
151
152	if (copy_from_user(&addr, req->addr, req->addr_len))
153		return -EFAULT;
154
155	msg.netfn = req->msg.netfn;
156	msg.cmd = req->msg.cmd;
157	msg.data_len = req->msg.data_len;
158	msg.data = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
159	if (!msg.data)
160		return -ENOMEM;
161
162	/* From here out we cannot return, we must jump to "out" for
163	   error exits to free msgdata. */
164
165	rv = ipmi_validate_addr(&addr, req->addr_len);
166	if (rv)
167		goto out;
168
169	if (req->msg.data != NULL) {
170		if (req->msg.data_len > IPMI_MAX_MSG_LENGTH) {
171			rv = -EMSGSIZE;
172			goto out;
173		}
174
175		if (copy_from_user(msg.data,
176				   req->msg.data,
177				   req->msg.data_len)) {
178			rv = -EFAULT;
179			goto out;
180		}
181	} else {
182		msg.data_len = 0;
183	}
184
185	rv = ipmi_request_settime(user,
186				  &addr,
187				  req->msgid,
188				  &msg,
189				  NULL,
190				  0,
191				  retries,
192				  retry_time_ms);
193 out:
194	kfree(msg.data);
195	return rv;
196}
197
198static int handle_recv(struct ipmi_file_private *priv,
199			bool trunc, struct ipmi_recv *rsp,
200			int (*copyout)(struct ipmi_recv *, void __user *),
201			void __user *to)
202{
203	int              addr_len;
204	struct list_head *entry;
205	struct ipmi_recv_msg  *msg;
206	unsigned long    flags;
207	int rv = 0, rv2 = 0;
208
209	/* We claim a mutex because we don't want two
210	   users getting something from the queue at a time.
211	   Since we have to release the spinlock before we can
212	   copy the data to the user, it's possible another
213	   user will grab something from the queue, too.  Then
214	   the messages might get out of order if something
215	   fails and the message gets put back onto the
216	   queue.  This mutex prevents that problem. */
217	mutex_lock(&priv->recv_mutex);
218
219	/* Grab the message off the list. */
220	spin_lock_irqsave(&priv->recv_msg_lock, flags);
221	if (list_empty(&(priv->recv_msgs))) {
222		spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
223		rv = -EAGAIN;
224		goto recv_err;
225	}
226	entry = priv->recv_msgs.next;
227	msg = list_entry(entry, struct ipmi_recv_msg, link);
228	list_del(entry);
229	spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
230
231	addr_len = ipmi_addr_length(msg->addr.addr_type);
232	if (rsp->addr_len < addr_len) {
233		rv = -EINVAL;
234		goto recv_putback_on_err;
235	}
236
237	if (copy_to_user(rsp->addr, &msg->addr, addr_len)) {
238		rv = -EFAULT;
239		goto recv_putback_on_err;
240	}
241	rsp->addr_len = addr_len;
242
243	rsp->recv_type = msg->recv_type;
244	rsp->msgid = msg->msgid;
245	rsp->msg.netfn = msg->msg.netfn;
246	rsp->msg.cmd = msg->msg.cmd;
247
248	if (msg->msg.data_len > 0) {
249		if (rsp->msg.data_len < msg->msg.data_len) {
250			rv2 = -EMSGSIZE;
251			if (trunc)
252				msg->msg.data_len = rsp->msg.data_len;
253			else
254				goto recv_putback_on_err;
255		}
256
257		if (copy_to_user(rsp->msg.data,
258				 msg->msg.data,
259				 msg->msg.data_len)) {
260			rv = -EFAULT;
261			goto recv_putback_on_err;
262		}
263		rsp->msg.data_len = msg->msg.data_len;
264	} else {
265		rsp->msg.data_len = 0;
266	}
267
268	rv = copyout(rsp, to);
269	if (rv)
270		goto recv_putback_on_err;
271
272	mutex_unlock(&priv->recv_mutex);
273	ipmi_free_recv_msg(msg);
274	return rv2;
275
276recv_putback_on_err:
277	/* If we got an error, put the message back onto
278	   the head of the queue. */
279	spin_lock_irqsave(&priv->recv_msg_lock, flags);
280	list_add(entry, &priv->recv_msgs);
281	spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
282recv_err:
283	mutex_unlock(&priv->recv_mutex);
284	return rv;
285}
286
287static int copyout_recv(struct ipmi_recv *rsp, void __user *to)
288{
289	return copy_to_user(to, rsp, sizeof(struct ipmi_recv)) ? -EFAULT : 0;
290}
291
292static long ipmi_ioctl(struct file   *file,
293		       unsigned int  cmd,
294		       unsigned long data)
295{
296	int                      rv = -EINVAL;
297	struct ipmi_file_private *priv = file->private_data;
298	void __user *arg = (void __user *)data;
299
300	switch (cmd)
301	{
302	case IPMICTL_SEND_COMMAND:
303	{
304		struct ipmi_req req;
305		int retries;
306		unsigned int retry_time_ms;
307
308		if (copy_from_user(&req, arg, sizeof(req))) {
309			rv = -EFAULT;
310			break;
311		}
312
313		mutex_lock(&priv->recv_mutex);
314		retries = priv->default_retries;
315		retry_time_ms = priv->default_retry_time_ms;
316		mutex_unlock(&priv->recv_mutex);
317
318		rv = handle_send_req(priv->user, &req, retries, retry_time_ms);
319		break;
320	}
321
322	case IPMICTL_SEND_COMMAND_SETTIME:
323	{
324		struct ipmi_req_settime req;
325
326		if (copy_from_user(&req, arg, sizeof(req))) {
327			rv = -EFAULT;
328			break;
329		}
330
331		rv = handle_send_req(priv->user,
332				     &req.req,
333				     req.retries,
334				     req.retry_time_ms);
335		break;
336	}
337
338	case IPMICTL_RECEIVE_MSG:
339	case IPMICTL_RECEIVE_MSG_TRUNC:
340	{
341		struct ipmi_recv      rsp;
342
343		if (copy_from_user(&rsp, arg, sizeof(rsp)))
344			rv = -EFAULT;
345		else
346			rv = handle_recv(priv, cmd == IPMICTL_RECEIVE_MSG_TRUNC,
347					 &rsp, copyout_recv, arg);
348		break;
349	}
350
351	case IPMICTL_REGISTER_FOR_CMD:
352	{
353		struct ipmi_cmdspec val;
354
355		if (copy_from_user(&val, arg, sizeof(val))) {
356			rv = -EFAULT;
357			break;
358		}
359
360		rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
361					   IPMI_CHAN_ALL);
362		break;
363	}
364
365	case IPMICTL_UNREGISTER_FOR_CMD:
366	{
367		struct ipmi_cmdspec   val;
368
369		if (copy_from_user(&val, arg, sizeof(val))) {
370			rv = -EFAULT;
371			break;
372		}
373
374		rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
375					     IPMI_CHAN_ALL);
376		break;
377	}
378
379	case IPMICTL_REGISTER_FOR_CMD_CHANS:
380	{
381		struct ipmi_cmdspec_chans val;
382
383		if (copy_from_user(&val, arg, sizeof(val))) {
384			rv = -EFAULT;
385			break;
386		}
387
388		rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
389					   val.chans);
390		break;
391	}
392
393	case IPMICTL_UNREGISTER_FOR_CMD_CHANS:
394	{
395		struct ipmi_cmdspec_chans val;
396
397		if (copy_from_user(&val, arg, sizeof(val))) {
398			rv = -EFAULT;
399			break;
400		}
401
402		rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
403					     val.chans);
404		break;
405	}
406
407	case IPMICTL_SET_GETS_EVENTS_CMD:
408	{
409		int val;
410
411		if (copy_from_user(&val, arg, sizeof(val))) {
412			rv = -EFAULT;
413			break;
414		}
415
416		rv = ipmi_set_gets_events(priv->user, val);
417		break;
418	}
419
420	/* The next four are legacy, not per-channel. */
421	case IPMICTL_SET_MY_ADDRESS_CMD:
422	{
423		unsigned int val;
424
425		if (copy_from_user(&val, arg, sizeof(val))) {
426			rv = -EFAULT;
427			break;
428		}
429
430		rv = ipmi_set_my_address(priv->user, 0, val);
431		break;
432	}
433
434	case IPMICTL_GET_MY_ADDRESS_CMD:
435	{
436		unsigned int  val;
437		unsigned char rval;
438
439		rv = ipmi_get_my_address(priv->user, 0, &rval);
440		if (rv)
441			break;
442
443		val = rval;
444
445		if (copy_to_user(arg, &val, sizeof(val))) {
446			rv = -EFAULT;
447			break;
448		}
449		break;
450	}
451
452	case IPMICTL_SET_MY_LUN_CMD:
453	{
454		unsigned int val;
455
456		if (copy_from_user(&val, arg, sizeof(val))) {
457			rv = -EFAULT;
458			break;
459		}
460
461		rv = ipmi_set_my_LUN(priv->user, 0, val);
462		break;
463	}
464
465	case IPMICTL_GET_MY_LUN_CMD:
466	{
467		unsigned int  val;
468		unsigned char rval;
469
470		rv = ipmi_get_my_LUN(priv->user, 0, &rval);
471		if (rv)
472			break;
473
474		val = rval;
475
476		if (copy_to_user(arg, &val, sizeof(val))) {
477			rv = -EFAULT;
478			break;
479		}
480		break;
481	}
482
483	case IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD:
484	{
485		struct ipmi_channel_lun_address_set val;
486
487		if (copy_from_user(&val, arg, sizeof(val))) {
488			rv = -EFAULT;
489			break;
490		}
491
492		return ipmi_set_my_address(priv->user, val.channel, val.value);
493		break;
494	}
495
496	case IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD:
497	{
498		struct ipmi_channel_lun_address_set val;
499
500		if (copy_from_user(&val, arg, sizeof(val))) {
501			rv = -EFAULT;
502			break;
503		}
504
505		rv = ipmi_get_my_address(priv->user, val.channel, &val.value);
506		if (rv)
507			break;
508
509		if (copy_to_user(arg, &val, sizeof(val))) {
510			rv = -EFAULT;
511			break;
512		}
513		break;
514	}
515
516	case IPMICTL_SET_MY_CHANNEL_LUN_CMD:
517	{
518		struct ipmi_channel_lun_address_set val;
519
520		if (copy_from_user(&val, arg, sizeof(val))) {
521			rv = -EFAULT;
522			break;
523		}
524
525		rv = ipmi_set_my_LUN(priv->user, val.channel, val.value);
526		break;
527	}
528
529	case IPMICTL_GET_MY_CHANNEL_LUN_CMD:
530	{
531		struct ipmi_channel_lun_address_set val;
532
533		if (copy_from_user(&val, arg, sizeof(val))) {
534			rv = -EFAULT;
535			break;
536		}
537
538		rv = ipmi_get_my_LUN(priv->user, val.channel, &val.value);
539		if (rv)
540			break;
541
542		if (copy_to_user(arg, &val, sizeof(val))) {
543			rv = -EFAULT;
544			break;
545		}
546		break;
547	}
548
549	case IPMICTL_SET_TIMING_PARMS_CMD:
550	{
551		struct ipmi_timing_parms parms;
552
553		if (copy_from_user(&parms, arg, sizeof(parms))) {
554			rv = -EFAULT;
555			break;
556		}
557
558		mutex_lock(&priv->recv_mutex);
559		priv->default_retries = parms.retries;
560		priv->default_retry_time_ms = parms.retry_time_ms;
561		mutex_unlock(&priv->recv_mutex);
562		rv = 0;
563		break;
564	}
565
566	case IPMICTL_GET_TIMING_PARMS_CMD:
567	{
568		struct ipmi_timing_parms parms;
569
570		mutex_lock(&priv->recv_mutex);
571		parms.retries = priv->default_retries;
572		parms.retry_time_ms = priv->default_retry_time_ms;
573		mutex_unlock(&priv->recv_mutex);
574
575		if (copy_to_user(arg, &parms, sizeof(parms))) {
576			rv = -EFAULT;
577			break;
578		}
579
580		rv = 0;
581		break;
582	}
583
584	case IPMICTL_GET_MAINTENANCE_MODE_CMD:
585	{
586		int mode;
587
588		mode = ipmi_get_maintenance_mode(priv->user);
589		if (copy_to_user(arg, &mode, sizeof(mode))) {
590			rv = -EFAULT;
591			break;
592		}
593		rv = 0;
594		break;
595	}
596
597	case IPMICTL_SET_MAINTENANCE_MODE_CMD:
598	{
599		int mode;
600
601		if (copy_from_user(&mode, arg, sizeof(mode))) {
602			rv = -EFAULT;
603			break;
604		}
605		rv = ipmi_set_maintenance_mode(priv->user, mode);
606		break;
607	}
608
609	default:
610		rv = -ENOTTY;
611		break;
612	}
613
614	return rv;
615}
616
617#ifdef CONFIG_COMPAT
618/*
619 * The following code contains code for supporting 32-bit compatible
620 * ioctls on 64-bit kernels.  This allows running 32-bit apps on the
621 * 64-bit kernel
622 */
623#define COMPAT_IPMICTL_SEND_COMMAND	\
624	_IOR(IPMI_IOC_MAGIC, 13, struct compat_ipmi_req)
625#define COMPAT_IPMICTL_SEND_COMMAND_SETTIME	\
626	_IOR(IPMI_IOC_MAGIC, 21, struct compat_ipmi_req_settime)
627#define COMPAT_IPMICTL_RECEIVE_MSG	\
628	_IOWR(IPMI_IOC_MAGIC, 12, struct compat_ipmi_recv)
629#define COMPAT_IPMICTL_RECEIVE_MSG_TRUNC	\
630	_IOWR(IPMI_IOC_MAGIC, 11, struct compat_ipmi_recv)
631
632struct compat_ipmi_msg {
633	u8		netfn;
634	u8		cmd;
635	u16		data_len;
636	compat_uptr_t	data;
637};
638
639struct compat_ipmi_req {
640	compat_uptr_t		addr;
641	compat_uint_t		addr_len;
642	compat_long_t		msgid;
643	struct compat_ipmi_msg	msg;
644};
645
646struct compat_ipmi_recv {
647	compat_int_t		recv_type;
648	compat_uptr_t		addr;
649	compat_uint_t		addr_len;
650	compat_long_t		msgid;
651	struct compat_ipmi_msg	msg;
652};
653
654struct compat_ipmi_req_settime {
655	struct compat_ipmi_req	req;
656	compat_int_t		retries;
657	compat_uint_t		retry_time_ms;
658};
659
660/*
661 * Define some helper functions for copying IPMI data
662 */
663static void get_compat_ipmi_msg(struct ipmi_msg *p64,
664				struct compat_ipmi_msg *p32)
665{
666	p64->netfn = p32->netfn;
667	p64->cmd = p32->cmd;
668	p64->data_len = p32->data_len;
669	p64->data = compat_ptr(p32->data);
670}
671
672static void get_compat_ipmi_req(struct ipmi_req *p64,
673				struct compat_ipmi_req *p32)
674{
675	p64->addr = compat_ptr(p32->addr);
676	p64->addr_len = p32->addr_len;
677	p64->msgid = p32->msgid;
678	get_compat_ipmi_msg(&p64->msg, &p32->msg);
679}
680
681static void get_compat_ipmi_req_settime(struct ipmi_req_settime *p64,
682		struct compat_ipmi_req_settime *p32)
683{
684	get_compat_ipmi_req(&p64->req, &p32->req);
685	p64->retries = p32->retries;
686	p64->retry_time_ms = p32->retry_time_ms;
687}
688
689static void get_compat_ipmi_recv(struct ipmi_recv *p64,
690				 struct compat_ipmi_recv *p32)
691{
692	memset(p64, 0, sizeof(struct ipmi_recv));
693	p64->recv_type = p32->recv_type;
694	p64->addr = compat_ptr(p32->addr);
695	p64->addr_len = p32->addr_len;
696	p64->msgid = p32->msgid;
697	get_compat_ipmi_msg(&p64->msg, &p32->msg);
698}
699
700static int copyout_recv32(struct ipmi_recv *p64, void __user *to)
701{
702	struct compat_ipmi_recv v32;
703	memset(&v32, 0, sizeof(struct compat_ipmi_recv));
704	v32.recv_type = p64->recv_type;
705	v32.addr = ptr_to_compat(p64->addr);
706	v32.addr_len = p64->addr_len;
707	v32.msgid = p64->msgid;
708	v32.msg.netfn = p64->msg.netfn;
709	v32.msg.cmd = p64->msg.cmd;
710	v32.msg.data_len = p64->msg.data_len;
711	v32.msg.data = ptr_to_compat(p64->msg.data);
712	return copy_to_user(to, &v32, sizeof(v32)) ? -EFAULT : 0;
713}
714
715/*
716 * Handle compatibility ioctls
717 */
718static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
719			      unsigned long arg)
720{
721	struct ipmi_file_private *priv = filep->private_data;
722
723	switch(cmd) {
724	case COMPAT_IPMICTL_SEND_COMMAND:
725	{
726		struct ipmi_req	rp;
727		struct compat_ipmi_req r32;
728		int retries;
729		unsigned int retry_time_ms;
730
731		if (copy_from_user(&r32, compat_ptr(arg), sizeof(r32)))
732			return -EFAULT;
733
734		get_compat_ipmi_req(&rp, &r32);
735
736		mutex_lock(&priv->recv_mutex);
737		retries = priv->default_retries;
738		retry_time_ms = priv->default_retry_time_ms;
739		mutex_unlock(&priv->recv_mutex);
740
741		return handle_send_req(priv->user, &rp,
742				       retries, retry_time_ms);
743	}
744	case COMPAT_IPMICTL_SEND_COMMAND_SETTIME:
745	{
746		struct ipmi_req_settime	sp;
747		struct compat_ipmi_req_settime sp32;
748
749		if (copy_from_user(&sp32, compat_ptr(arg), sizeof(sp32)))
750			return -EFAULT;
751
752		get_compat_ipmi_req_settime(&sp, &sp32);
753
754		return handle_send_req(priv->user, &sp.req,
755				sp.retries, sp.retry_time_ms);
756	}
757	case COMPAT_IPMICTL_RECEIVE_MSG:
758	case COMPAT_IPMICTL_RECEIVE_MSG_TRUNC:
759	{
760		struct ipmi_recv   recv64;
761		struct compat_ipmi_recv recv32;
762
763		if (copy_from_user(&recv32, compat_ptr(arg), sizeof(recv32)))
764			return -EFAULT;
765
766		get_compat_ipmi_recv(&recv64, &recv32);
767
768		return handle_recv(priv,
769				 cmd == COMPAT_IPMICTL_RECEIVE_MSG_TRUNC,
770				 &recv64, copyout_recv32, compat_ptr(arg));
771	}
772	default:
773		return ipmi_ioctl(filep, cmd, arg);
774	}
775}
776#endif
777
778static const struct file_operations ipmi_fops = {
779	.owner		= THIS_MODULE,
780	.unlocked_ioctl	= ipmi_ioctl,
781#ifdef CONFIG_COMPAT
782	.compat_ioctl   = compat_ipmi_ioctl,
783#endif
784	.open		= ipmi_open,
785	.release	= ipmi_release,
786	.fasync		= ipmi_fasync,
787	.poll		= ipmi_poll,
788	.llseek		= noop_llseek,
789};
790
791#define DEVICE_NAME     "ipmidev"
792
793static int ipmi_major;
794module_param(ipmi_major, int, 0);
795MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device.  By"
796		 " default, or if you set it to zero, it will choose the next"
797		 " available device.  Setting it to -1 will disable the"
798		 " interface.  Other values will set the major device number"
799		 " to that value.");
800
801/* Keep track of the devices that are registered. */
802struct ipmi_reg_list {
803	dev_t            dev;
804	struct list_head link;
805};
806static LIST_HEAD(reg_list);
807static DEFINE_MUTEX(reg_list_mutex);
808
809static struct class *ipmi_class;
810
811static void ipmi_new_smi(int if_num, struct device *device)
812{
813	dev_t dev = MKDEV(ipmi_major, if_num);
814	struct ipmi_reg_list *entry;
815
816	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
817	if (!entry) {
818		pr_err("ipmi_devintf: Unable to create the ipmi class device link\n");
819		return;
820	}
821	entry->dev = dev;
822
823	mutex_lock(&reg_list_mutex);
824	device_create(ipmi_class, device, dev, NULL, "ipmi%d", if_num);
825	list_add(&entry->link, &reg_list);
826	mutex_unlock(&reg_list_mutex);
827}
828
829static void ipmi_smi_gone(int if_num)
830{
831	dev_t dev = MKDEV(ipmi_major, if_num);
832	struct ipmi_reg_list *entry;
833
834	mutex_lock(&reg_list_mutex);
835	list_for_each_entry(entry, &reg_list, link) {
836		if (entry->dev == dev) {
837			list_del(&entry->link);
838			kfree(entry);
839			break;
840		}
841	}
842	device_destroy(ipmi_class, dev);
843	mutex_unlock(&reg_list_mutex);
844}
845
846static struct ipmi_smi_watcher smi_watcher =
847{
848	.owner    = THIS_MODULE,
849	.new_smi  = ipmi_new_smi,
850	.smi_gone = ipmi_smi_gone,
851};
852
853static int __init init_ipmi_devintf(void)
854{
855	int rv;
856
857	if (ipmi_major < 0)
858		return -EINVAL;
859
860	pr_info("ipmi device interface\n");
861
862	ipmi_class = class_create(THIS_MODULE, "ipmi");
863	if (IS_ERR(ipmi_class)) {
864		pr_err("ipmi: can't register device class\n");
865		return PTR_ERR(ipmi_class);
866	}
867
868	rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops);
869	if (rv < 0) {
870		class_destroy(ipmi_class);
871		pr_err("ipmi: can't get major %d\n", ipmi_major);
872		return rv;
873	}
874
875	if (ipmi_major == 0) {
876		ipmi_major = rv;
877	}
878
879	rv = ipmi_smi_watcher_register(&smi_watcher);
880	if (rv) {
881		unregister_chrdev(ipmi_major, DEVICE_NAME);
882		class_destroy(ipmi_class);
883		pr_warn("ipmi: can't register smi watcher\n");
884		return rv;
885	}
886
887	return 0;
888}
889module_init(init_ipmi_devintf);
890
891static void __exit cleanup_ipmi(void)
892{
893	struct ipmi_reg_list *entry, *entry2;
894	mutex_lock(&reg_list_mutex);
895	list_for_each_entry_safe(entry, entry2, &reg_list, link) {
896		list_del(&entry->link);
897		device_destroy(ipmi_class, entry->dev);
898		kfree(entry);
899	}
900	mutex_unlock(&reg_list_mutex);
901	class_destroy(ipmi_class);
902	ipmi_smi_watcher_unregister(&smi_watcher);
903	unregister_chrdev(ipmi_major, DEVICE_NAME);
904}
905module_exit(cleanup_ipmi);
906
907MODULE_LICENSE("GPL");
908MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
909MODULE_DESCRIPTION("Linux device interface for the IPMI message handler.");
910