#include <linux/slab.h>
#include <linux/devfs_fs_kernel.h>
#include <linux/ipmi.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/compat.h>
struct file *file;
struct fasync_struct *fasync_queue;
wait_queue_head_t wait;
- struct semaphore recv_sem;
+ struct mutex recv_mutex;
int default_retries;
unsigned int default_retry_time_ms;
};
INIT_LIST_HEAD(&(priv->recv_msgs));
init_waitqueue_head(&priv->wait);
priv->fasync_queue = NULL;
- sema_init(&(priv->recv_sem), 1);
+ mutex_init(&priv->recv_mutex);
/* Use the low-level defaults. */
priv->default_retries = -1;
break;
}
- /* We claim a semaphore because we don't want two
+ /* We claim a mutex because we don't want two
users getting something from the queue at a time.
Since we have to release the spinlock before we can
copy the data to the user, it's possible another
user will grab something from the queue, too. Then
the messages might get out of order if something
fails and the message gets put back onto the
- queue. This semaphore prevents that problem. */
- down(&(priv->recv_sem));
+ queue. This mutex prevents that problem. */
+ mutex_lock(&priv->recv_mutex);
/* Grab the message off the list. */
spin_lock_irqsave(&(priv->recv_msg_lock), flags);
goto recv_putback_on_err;
}
- up(&(priv->recv_sem));
+ mutex_unlock(&priv->recv_mutex);
ipmi_free_recv_msg(msg);
break;
spin_lock_irqsave(&(priv->recv_msg_lock), flags);
list_add(entry, &(priv->recv_msgs));
spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
- up(&(priv->recv_sem));
+ mutex_unlock(&priv->recv_mutex);
break;
recv_err:
- up(&(priv->recv_sem));
+ mutex_unlock(&priv->recv_mutex);
break;
}
#include <linux/sched.h>
#include <linux/poll.h>
#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/ipmi.h>
#include <linux/ipmi_smi.h>
/* The list of command receivers that are registered for commands
on this interface. */
- struct semaphore cmd_rcvrs_lock;
+ struct mutex cmd_rcvrs_mutex;
struct list_head cmd_rcvrs;
/* Events that were queues because no one was there to receive
/* Wholesale remove all the entries from the list in the
* interface and wait for RCU to know that none are in use. */
- down(&intf->cmd_rcvrs_lock);
+ mutex_lock(&intf->cmd_rcvrs_mutex);
list_add_rcu(&list, &intf->cmd_rcvrs);
list_del_rcu(&intf->cmd_rcvrs);
- up(&intf->cmd_rcvrs_lock);
+ mutex_unlock(&intf->cmd_rcvrs_mutex);
synchronize_rcu();
list_for_each_entry_safe(rcvr, rcvr2, &list, link)
* since other things may be using it till we do
* synchronize_rcu()) then free everything in that list.
*/
- down(&intf->cmd_rcvrs_lock);
+ mutex_lock(&intf->cmd_rcvrs_mutex);
list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
if (rcvr->user == user) {
list_del_rcu(&rcvr->link);
rcvrs = rcvr;
}
}
- up(&intf->cmd_rcvrs_lock);
+ mutex_unlock(&intf->cmd_rcvrs_mutex);
synchronize_rcu();
while (rcvrs) {
rcvr = rcvrs;
rcvr->netfn = netfn;
rcvr->user = user;
- down(&intf->cmd_rcvrs_lock);
+ mutex_lock(&intf->cmd_rcvrs_mutex);
/* Make sure the command/netfn is not already registered. */
entry = find_cmd_rcvr(intf, netfn, cmd);
if (entry) {
list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
out_unlock:
- up(&intf->cmd_rcvrs_lock);
+ mutex_unlock(&intf->cmd_rcvrs_mutex);
if (rv)
kfree(rcvr);
ipmi_smi_t intf = user->intf;
struct cmd_rcvr *rcvr;
- down(&intf->cmd_rcvrs_lock);
+ mutex_lock(&intf->cmd_rcvrs_mutex);
/* Make sure the command/netfn is not already registered. */
rcvr = find_cmd_rcvr(intf, netfn, cmd);
if ((rcvr) && (rcvr->user == user)) {
list_del_rcu(&rcvr->link);
- up(&intf->cmd_rcvrs_lock);
+ mutex_unlock(&intf->cmd_rcvrs_mutex);
synchronize_rcu();
kfree(rcvr);
return 0;
} else {
- up(&intf->cmd_rcvrs_lock);
+ mutex_unlock(&intf->cmd_rcvrs_mutex);
return -ENOENT;
}
}
spin_lock_init(&intf->events_lock);
INIT_LIST_HEAD(&intf->waiting_events);
intf->waiting_events_count = 0;
- init_MUTEX(&intf->cmd_rcvrs_lock);
+ mutex_init(&intf->cmd_rcvrs_mutex);
INIT_LIST_HEAD(&intf->cmd_rcvrs);
init_waitqueue_head(&intf->waitq);
#define SI_MAX_PARMS 4
static LIST_HEAD(smi_infos);
-static DECLARE_MUTEX(smi_infos_lock);
+static DEFINE_MUTEX(smi_infos_lock);
static int smi_num; /* Used to sequence the SMIs */
#define DEFAULT_REGSPACING 1
new_smi->slave_addr, new_smi->irq);
}
- down(&smi_infos_lock);
+ mutex_lock(&smi_infos_lock);
if (!is_new_interface(new_smi)) {
printk(KERN_WARNING "ipmi_si: duplicate interface\n");
rv = -EBUSY;
list_add_tail(&new_smi->link, &smi_infos);
- up(&smi_infos_lock);
+ mutex_unlock(&smi_infos_lock);
printk(" IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
kfree(new_smi);
- up(&smi_infos_lock);
+ mutex_unlock(&smi_infos_lock);
return rv;
}
#endif
if (si_trydefaults) {
- down(&smi_infos_lock);
+ mutex_lock(&smi_infos_lock);
if (list_empty(&smi_infos)) {
/* No BMC was found, try defaults. */
- up(&smi_infos_lock);
+ mutex_unlock(&smi_infos_lock);
default_find_bmc();
} else {
- up(&smi_infos_lock);
+ mutex_unlock(&smi_infos_lock);
}
}
- down(&smi_infos_lock);
+ mutex_lock(&smi_infos_lock);
if (list_empty(&smi_infos)) {
- up(&smi_infos_lock);
+ mutex_unlock(&smi_infos_lock);
#ifdef CONFIG_PCI
pci_unregister_driver(&ipmi_pci_driver);
#endif
printk("ipmi_si: Unable to find any System Interface(s)\n");
return -ENODEV;
} else {
- up(&smi_infos_lock);
+ mutex_unlock(&smi_infos_lock);
return 0;
}
}
pci_unregister_driver(&ipmi_pci_driver);
#endif
- down(&smi_infos_lock);
+ mutex_lock(&smi_infos_lock);
list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
cleanup_one_si(e);
- up(&smi_infos_lock);
+ mutex_unlock(&smi_infos_lock);
driver_unregister(&ipmi_driver);
}
#include <linux/watchdog.h>
#include <linux/miscdevice.h>
#include <linux/init.h>
+#include <linux/completion.h>
#include <linux/rwsem.h>
#include <linux/errno.h>
#include <asm/uaccess.h>
static void panic_halt_ipmi_heartbeat(void);
-/* We use a semaphore to make sure that only one thing can send a set
+/* We use a mutex to make sure that only one thing can send a set
timeout at one time, because we only have one copy of the data.
- The semaphore is claimed when the set_timeout is sent and freed
+ The mutex is claimed when the set_timeout is sent and freed
when both messages are free. */
static atomic_t set_timeout_tofree = ATOMIC_INIT(0);
-static DECLARE_MUTEX(set_timeout_lock);
+static DEFINE_MUTEX(set_timeout_lock);
+static DECLARE_COMPLETION(set_timeout_wait);
static void set_timeout_free_smi(struct ipmi_smi_msg *msg)
{
if (atomic_dec_and_test(&set_timeout_tofree))
- up(&set_timeout_lock);
+ complete(&set_timeout_wait);
}
static void set_timeout_free_recv(struct ipmi_recv_msg *msg)
{
if (atomic_dec_and_test(&set_timeout_tofree))
- up(&set_timeout_lock);
+ complete(&set_timeout_wait);
}
static struct ipmi_smi_msg set_timeout_smi_msg =
{
/* We can only send one of these at a time. */
- down(&set_timeout_lock);
+ mutex_lock(&set_timeout_lock);
atomic_set(&set_timeout_tofree, 2);
&set_timeout_recv_msg,
&send_heartbeat_now);
if (rv) {
- up(&set_timeout_lock);
- } else {
- if ((do_heartbeat == IPMI_SET_TIMEOUT_FORCE_HB)
- || ((send_heartbeat_now)
- && (do_heartbeat == IPMI_SET_TIMEOUT_HB_IF_NECESSARY)))
- {
- rv = ipmi_heartbeat();
- }
+ mutex_unlock(&set_timeout_lock);
+ goto out;
+ }
+
+ wait_for_completion(&set_timeout_wait);
+
+ if ((do_heartbeat == IPMI_SET_TIMEOUT_FORCE_HB)
+ || ((send_heartbeat_now)
+ && (do_heartbeat == IPMI_SET_TIMEOUT_HB_IF_NECESSARY)))
+ {
+ rv = ipmi_heartbeat();
}
+ mutex_unlock(&set_timeout_lock);
+out:
return rv;
}
The semaphore is claimed when the set_timeout is sent and freed
when both messages are free. */
static atomic_t heartbeat_tofree = ATOMIC_INIT(0);
-static DECLARE_MUTEX(heartbeat_lock);
-static DECLARE_MUTEX_LOCKED(heartbeat_wait_lock);
+static DEFINE_MUTEX(heartbeat_lock);
+static DECLARE_COMPLETION(heartbeat_wait);
static void heartbeat_free_smi(struct ipmi_smi_msg *msg)
{
if (atomic_dec_and_test(&heartbeat_tofree))
- up(&heartbeat_wait_lock);
+ complete(&heartbeat_wait);
}
static void heartbeat_free_recv(struct ipmi_recv_msg *msg)
{
if (atomic_dec_and_test(&heartbeat_tofree))
- up(&heartbeat_wait_lock);
+ complete(&heartbeat_wait);
}
static struct ipmi_smi_msg heartbeat_smi_msg =
{
return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
}
- down(&heartbeat_lock);
+ mutex_lock(&heartbeat_lock);
atomic_set(&heartbeat_tofree, 2);
/* Don't reset the timer if we have the timer turned off, that
re-enables the watchdog. */
if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) {
- up(&heartbeat_lock);
+ mutex_unlock(&heartbeat_lock);
return 0;
}
&heartbeat_recv_msg,
1);
if (rv) {
- up(&heartbeat_lock);
+ mutex_unlock(&heartbeat_lock);
printk(KERN_WARNING PFX "heartbeat failure: %d\n",
rv);
return rv;
}
/* Wait for the heartbeat to be sent. */
- down(&heartbeat_wait_lock);
+ wait_for_completion(&heartbeat_wait);
if (heartbeat_recv_msg.msg.data[0] != 0) {
/* Got an error in the heartbeat response. It was already
rv = -EINVAL;
}
- up(&heartbeat_lock);
+ mutex_unlock(&heartbeat_lock);
return rv;
}