return i ? 0 : -ENOMEM;
}
-static void FASTCALL(rx_refill_atomic(struct net_device *ndev));
+static void rx_refill_atomic(struct net_device *ndev));
static void fastcall rx_refill_atomic(struct net_device *ndev)
{
rx_refill(ndev, GFP_ATOMIC);
build_rx_desc(dev, dev->rx_info.descs + (DESC_SIZE * i), 0, 0, CMDSTS_OWN, 0);
}
-static void FASTCALL(phy_intr(struct net_device *ndev));
static void fastcall phy_intr(struct net_device *ndev)
{
struct ns83820 *dev = PRIV(ndev);
}
}
-static void FASTCALL(ns83820_rx_kick(struct net_device *ndev));
static void fastcall ns83820_rx_kick(struct net_device *ndev)
{
struct ns83820 *dev = PRIV(ndev);
/* rx_irq
*
*/
-static void FASTCALL(rx_irq(struct net_device *ndev));
static void fastcall rx_irq(struct net_device *ndev)
{
struct ns83820 *dev = PRIV(ndev);
rfcomm_dlc_free(d);
}
-extern void FASTCALL(__rfcomm_dlc_throttle(struct rfcomm_dlc *d));
-extern void FASTCALL(__rfcomm_dlc_unthrottle(struct rfcomm_dlc *d));
+extern void __rfcomm_dlc_throttle(struct rfcomm_dlc *d);
+extern void __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d);
static inline void rfcomm_dlc_throttle(struct rfcomm_dlc *d)
{
lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
} while (0)
-extern void FASTCALL(lock_sock_nested(struct sock *sk, int subclass));
+extern void lock_sock_nested(struct sock *sk, int subclass);
static inline void lock_sock(struct sock *sk)
{
lock_sock_nested(sk, 0);
}
-extern void FASTCALL(release_sock(struct sock *sk));
+extern void release_sock(struct sock *sk);
/* BH context may only use the following locking interface. */
#define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock))