diff options
Diffstat (limited to 'net')
29 files changed, 148 insertions, 125 deletions
diff --git a/net/atm/lec.c b/net/atm/lec.c index 5946ec63724..3fc0abeeaf3 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -1454,7 +1454,7 @@ static void lane2_associate_ind(struct net_device *dev, u8 *mac_addr, #define LEC_ARP_REFRESH_INTERVAL (3*HZ) -static void lec_arp_check_expire(void *data); +static void lec_arp_check_expire(struct work_struct *work); static void lec_arp_expire_arp(unsigned long data); /* @@ -1477,7 +1477,7 @@ static void lec_arp_init(struct lec_priv *priv) INIT_HLIST_HEAD(&priv->lec_no_forward); INIT_HLIST_HEAD(&priv->mcast_fwds); spin_lock_init(&priv->lec_arp_lock); - INIT_WORK(&priv->lec_arp_work, lec_arp_check_expire, priv); + INIT_DELAYED_WORK(&priv->lec_arp_work, lec_arp_check_expire); schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL); } @@ -1875,10 +1875,11 @@ static void lec_arp_expire_vcc(unsigned long data) * to ESI_FORWARD_DIRECT. This causes the flush period to end * regardless of the progress of the flush protocol. */ -static void lec_arp_check_expire(void *data) +static void lec_arp_check_expire(struct work_struct *work) { unsigned long flags; - struct lec_priv *priv = data; + struct lec_priv *priv = + container_of(work, struct lec_priv, lec_arp_work.work); struct hlist_node *node, *next; struct lec_arp_table *entry; unsigned long now; diff --git a/net/atm/lec.h b/net/atm/lec.h index 24cc95f8674..99136babd53 100644 --- a/net/atm/lec.h +++ b/net/atm/lec.h @@ -92,7 +92,7 @@ struct lec_priv { spinlock_t lec_arp_lock; struct atm_vcc *mcast_vcc; /* Default Multicast Send VCC */ struct atm_vcc *lecd; - struct work_struct lec_arp_work; /* C10 */ + struct delayed_work lec_arp_work; /* C10 */ unsigned int maximum_unknown_frame_count; /* * Within the period of time defined by this variable, the client will send diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index 3eeeb7a86e7..d4c935692cc 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c @@ -237,9 +237,9 @@ static void bt_release(struct device *dev) kfree(data); } -static void add_conn(void *data) +static void add_conn(struct work_struct *work) { - struct hci_conn *conn = data; + struct hci_conn *conn = container_of(work, struct hci_conn, work); int i; if (device_register(&conn->dev) < 0) { @@ -272,14 +272,14 @@ void hci_conn_add_sysfs(struct hci_conn *conn) dev_set_drvdata(&conn->dev, conn); - INIT_WORK(&conn->work, add_conn, (void *) conn); + INIT_WORK(&conn->work, add_conn); schedule_work(&conn->work); } -static void del_conn(void *data) +static void del_conn(struct work_struct *work) { - struct hci_conn *conn = data; + struct hci_conn *conn = container_of(work, struct hci_conn, work); device_del(&conn->dev); } @@ -287,7 +287,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn) { BT_DBG("conn %p", conn); - INIT_WORK(&conn->work, del_conn, (void *) conn); + INIT_WORK(&conn->work, del_conn); schedule_work(&conn->work); } diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index f753c40c11d..55bb2634c08 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -77,12 +77,16 @@ static int port_cost(struct net_device *dev) * Called from work queue to allow for calling functions that * might sleep (such as speed check), and to debounce. */ -static void port_carrier_check(void *arg) +static void port_carrier_check(struct work_struct *work) { - struct net_device *dev = arg; struct net_bridge_port *p; + struct net_device *dev; struct net_bridge *br; + dev = container_of(work, struct net_bridge_port, + carrier_check.work)->dev; + work_release(work); + rtnl_lock(); p = dev->br_port; if (!p) @@ -276,7 +280,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br, p->port_no = index; br_init_port(p); p->state = BR_STATE_DISABLED; - INIT_WORK(&p->carrier_check, port_carrier_check, dev); + INIT_DELAYED_WORK_NAR(&p->carrier_check, port_carrier_check); br_stp_port_timer_init(p); kobject_init(&p->kobj); diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 74258d86f25..3a534e94c7f 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -82,7 +82,7 @@ struct net_bridge_port struct timer_list hold_timer; struct timer_list message_age_timer; struct kobject kobj; - struct work_struct carrier_check; + struct delayed_work carrier_check; struct rcu_head rcu; }; diff --git a/net/core/link_watch.c b/net/core/link_watch.c index 4b36114744c..549a2ce951b 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c @@ -34,8 +34,8 @@ enum lw_bits { static unsigned long linkwatch_flags; static unsigned long linkwatch_nextevent; -static void linkwatch_event(void *dummy); -static DECLARE_WORK(linkwatch_work, linkwatch_event, NULL); +static void linkwatch_event(struct work_struct *dummy); +static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event); static LIST_HEAD(lweventlist); static DEFINE_SPINLOCK(lweventlist_lock); @@ -127,7 +127,7 @@ void linkwatch_run_queue(void) } -static void linkwatch_event(void *dummy) +static void linkwatch_event(struct work_struct *dummy) { /* Limit the number of linkwatch events to one * per second so that a runaway driver does not @@ -171,10 +171,9 @@ void linkwatch_fire_event(struct net_device *dev) unsigned long delay = linkwatch_nextevent - jiffies; /* If we wrap around we'll delay it by at most HZ. */ - if (!delay || delay > HZ) - schedule_work(&linkwatch_work); - else - schedule_delayed_work(&linkwatch_work, delay); + if (delay > HZ) + delay = 0; + schedule_delayed_work(&linkwatch_work, delay); } } } diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 3c58846fcaa..b3c559b9ac3 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -50,9 +50,10 @@ static atomic_t trapped; static void zap_completion_queue(void); static void arp_reply(struct sk_buff *skb); -static void queue_process(void *p) +static void queue_process(struct work_struct *work) { - struct netpoll_info *npinfo = p; + struct netpoll_info *npinfo = + container_of(work, struct netpoll_info, tx_work.work); struct sk_buff *skb; while ((skb = skb_dequeue(&npinfo->txq))) { @@ -72,8 +73,6 @@ static void queue_process(void *p) schedule_delayed_work(&npinfo->tx_work, HZ/10); return; } - - netif_tx_unlock_bh(dev); } } @@ -263,7 +262,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) if (status != NETDEV_TX_OK) { skb_queue_tail(&npinfo->txq, skb); - schedule_work(&npinfo->tx_work); + schedule_delayed_work(&npinfo->tx_work,0); } } @@ -628,7 +627,7 @@ int netpoll_setup(struct netpoll *np) spin_lock_init(&npinfo->rx_lock); skb_queue_head_init(&npinfo->arp_tx); skb_queue_head_init(&npinfo->txq); - INIT_WORK(&npinfo->tx_work, queue_process, npinfo); + INIT_DELAYED_WORK(&npinfo->tx_work, queue_process); atomic_set(&npinfo->refcnt, 1); } else { diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index 7b52f2a03ee..4c9e26775f7 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c @@ -32,8 +32,7 @@ struct inet_timewait_death_row dccp_death_row = { .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, (unsigned long)&dccp_death_row), .twkill_work = __WORK_INITIALIZER(dccp_death_row.twkill_work, - inet_twdr_twkill_work, - &dccp_death_row), + inet_twdr_twkill_work), /* Short-time timewait calendar */ .twcal_hand = -1, diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c index cf51c87a971..08386c10295 100644 --- a/net/ieee80211/softmac/ieee80211softmac_assoc.c +++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c @@ -58,9 +58,11 @@ ieee80211softmac_assoc(struct ieee80211softmac_device *mac, struct ieee80211soft } void -ieee80211softmac_assoc_timeout(void *d) +ieee80211softmac_assoc_timeout(struct work_struct *work) { - struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d; + struct ieee80211softmac_device *mac = + container_of(work, struct ieee80211softmac_device, + associnfo.timeout.work); struct ieee80211softmac_network *n; mutex_lock(&mac->associnfo.mutex); @@ -186,9 +188,11 @@ ieee80211softmac_assoc_notify_auth(struct net_device *dev, int event_type, void /* This function is called to handle userspace requests (asynchronously) */ void -ieee80211softmac_assoc_work(void *d) +ieee80211softmac_assoc_work(struct work_struct *work) { - struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d; + struct ieee80211softmac_device *mac = + container_of(work, struct ieee80211softmac_device, + associnfo.work.work); struct ieee80211softmac_network *found = NULL; struct ieee80211_network *net = NULL, *best = NULL; int bssvalid; @@ -412,7 +416,7 @@ ieee80211softmac_handle_assoc_response(struct net_device * dev, network->authenticated = 0; /* we don't want to do this more than once ... */ network->auth_desynced_once = 1; - schedule_work(&mac->associnfo.work); + schedule_delayed_work(&mac->associnfo.work, 0); break; } default: @@ -446,7 +450,7 @@ ieee80211softmac_handle_disassoc(struct net_device * dev, ieee80211softmac_disassoc(mac); /* try to reassociate */ - schedule_work(&mac->associnfo.work); + schedule_delayed_work(&mac->associnfo.work, 0); return 0; } @@ -466,7 +470,7 @@ ieee80211softmac_handle_reassoc_req(struct net_device * dev, dprintkl(KERN_INFO PFX "reassoc request from unknown network\n"); return 0; } - schedule_work(&mac->associnfo.work); + schedule_delayed_work(&mac->associnfo.work, 0); return 0; } diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c index 0612015f1c7..6012705aa4f 100644 --- a/net/ieee80211/softmac/ieee80211softmac_auth.c +++ b/net/ieee80211/softmac/ieee80211softmac_auth.c @@ -26,7 +26,7 @@ #include "ieee80211softmac_priv.h" -static void ieee80211softmac_auth_queue(void *data); +static void ieee80211softmac_auth_queue(struct work_struct *work); /* Queues an auth request to the desired AP */ int @@ -54,14 +54,14 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac, auth->mac = mac; auth->retry = IEEE80211SOFTMAC_AUTH_RETRY_LIMIT; auth->state = IEEE80211SOFTMAC_AUTH_OPEN_REQUEST; - INIT_WORK(&auth->work, &ieee80211softmac_auth_queue, (void *)auth); + INIT_DELAYED_WORK(&auth->work, ieee80211softmac_auth_queue); /* Lock (for list) */ spin_lock_irqsave(&mac->lock, flags); /* add to list */ list_add_tail(&auth->list, &mac->auth_queue); - schedule_work(&auth->work); + schedule_delayed_work(&auth->work, 0); spin_unlock_irqrestore(&mac->lock, flags); return 0; @@ -70,14 +70,15 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac, /* Sends an auth request to the desired AP and handles timeouts */ static void -ieee80211softmac_auth_queue(void *data) +ieee80211softmac_auth_queue(struct work_struct *work) { struct ieee80211softmac_device *mac; struct ieee80211softmac_auth_queue_item *auth; struct ieee80211softmac_network *net; unsigned long flags; - auth = (struct ieee80211softmac_auth_queue_item *)data; + auth = container_of(work, struct ieee80211softmac_auth_queue_item, + work.work); net = auth->net; mac = auth->mac; @@ -118,9 +119,11 @@ ieee80211softmac_auth_queue(void *data) /* Sends a response to an auth challenge (for shared key auth). */ static void -ieee80211softmac_auth_challenge_response(void *_aq) +ieee80211softmac_auth_challenge_response(struct work_struct *work) { - struct ieee80211softmac_auth_queue_item *aq = _aq; + struct ieee80211softmac_auth_queue_item *aq = + container_of(work, struct ieee80211softmac_auth_queue_item, + work.work); /* Send our response */ ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state); @@ -234,8 +237,8 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth) * we have obviously already sent the initial auth * request. */ cancel_delayed_work(&aq->work); - INIT_WORK(&aq->work, &ieee80211softmac_auth_challenge_response, (void *)aq); - schedule_work(&aq->work); + INIT_DELAYED_WORK(&aq->work, &ieee80211softmac_auth_challenge_response); + schedule_delayed_work(&aq->work, 0); spin_unlock_irqrestore(&mac->lock, flags); return 0; case IEEE80211SOFTMAC_AUTH_SHARED_PASS: @@ -398,6 +401,6 @@ ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *de ieee80211softmac_deauth_from_net(mac, net); /* let's try to re-associate */ - schedule_work(&mac->associnfo.work); + schedule_delayed_work(&mac->associnfo.work, 0); return 0; } diff --git a/net/ieee80211/softmac/ieee80211softmac_event.c b/net/ieee80211/softmac/ieee80211softmac_event.c index f34fa2ef666..b9015656cfb 100644 --- a/net/ieee80211/softmac/ieee80211softmac_event.c +++ b/net/ieee80211/softmac/ieee80211softmac_event.c @@ -73,10 +73,12 @@ static char *event_descriptions[IEEE80211SOFTMAC_EVENT_LAST+1] = { static void -ieee80211softmac_notify_callback(void *d) +ieee80211softmac_notify_callback(struct work_struct *work) { - struct ieee80211softmac_event event = *(struct ieee80211softmac_event*) d; - kfree(d); + struct ieee80211softmac_event *pevent = + container_of(work, struct ieee80211softmac_event, work.work); + struct ieee80211softmac_event event = *pevent; + kfree(pevent); event.fun(event.mac->dev, event.event_type, event.context); } @@ -99,7 +101,7 @@ ieee80211softmac_notify_internal(struct ieee80211softmac_device *mac, return -ENOMEM; eventptr->event_type = event; - INIT_WORK(&eventptr->work, ieee80211softmac_notify_callback, eventptr); + INIT_DELAYED_WORK(&eventptr->work, ieee80211softmac_notify_callback); eventptr->fun = fun; eventptr->context = context; eventptr->mac = mac; @@ -170,7 +172,7 @@ ieee80211softmac_call_events_locked(struct ieee80211softmac_device *mac, int eve /* User may have subscribed to ANY event, so * we tell them which event triggered it. */ eventptr->event_type = event; - schedule_work(&eventptr->work); + schedule_delayed_work(&eventptr->work, 0); } } } diff --git a/net/ieee80211/softmac/ieee80211softmac_module.c b/net/ieee80211/softmac/ieee80211softmac_module.c index 33aff4f4a47..256207b71dc 100644 --- a/net/ieee80211/softmac/ieee80211softmac_module.c +++ b/net/ieee80211/softmac/ieee80211softmac_module.c @@ -58,8 +58,8 @@ struct net_device *alloc_ieee80211softmac(int sizeof_priv) INIT_LIST_HEAD(&softmac->events); mutex_init(&softmac->associnfo.mutex); - INIT_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work, softmac); - INIT_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout, softmac); + INIT_DELAYED_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work); + INIT_DELAYED_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout); softmac->start_scan = ieee80211softmac_start_scan_implementation; softmac->wait_for_scan = ieee80211softmac_wait_for_scan_implementation; softmac->stop_scan = ieee80211softmac_stop_scan_implementation; diff --git a/net/ieee80211/softmac/ieee80211softmac_priv.h b/net/ieee80211/softmac/ieee80211softmac_priv.h index 0642e090b8a..c0dbe070e54 100644 --- a/net/ieee80211/softmac/ieee80211softmac_priv.h +++ b/net/ieee80211/softmac/ieee80211softmac_priv.h @@ -78,7 +78,7 @@ /* private definitions and prototypes */ /*** prototypes from _scan.c */ -void ieee80211softmac_scan(void *sm); +void ieee80211softmac_scan(struct work_struct *work); /* for internal use if scanning is needed */ int ieee80211softmac_start_scan(struct ieee80211softmac_device *mac); void ieee80211softmac_stop_scan(struct ieee80211softmac_device *mac); @@ -149,7 +149,7 @@ int ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *au int ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *deauth); /*** prototypes from _assoc.c */ -void ieee80211softmac_assoc_work(void *d); +void ieee80211softmac_assoc_work(struct work_struct *work); int ieee80211softmac_handle_assoc_response(struct net_device * dev, struct ieee80211_assoc_response * resp, struct ieee80211_network * network); @@ -157,7 +157,7 @@ int ieee80211softmac_handle_disassoc(struct net_device * dev, struct ieee80211_disassoc * disassoc); int ieee80211softmac_handle_reassoc_req(struct net_device * dev, struct ieee80211_reassoc_request * reassoc); -void ieee80211softmac_assoc_timeout(void *d); +void ieee80211softmac_assoc_timeout(struct work_struct *work); void ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason); void ieee80211softmac_disassoc(struct ieee80211softmac_device *mac); @@ -207,7 +207,7 @@ struct ieee80211softmac_auth_queue_item { struct ieee80211softmac_device *mac; /* SoftMAC device */ u8 retry; /* Retry limit */ u8 state; /* Auth State */ - struct work_struct work; /* Work queue */ + struct delayed_work work; /* Work queue */ }; /* scanning information */ @@ -219,7 +219,8 @@ struct ieee80211softmac_scaninfo { stop:1; u8 skip_flags; struct completion finished; - struct work_struct softmac_scan; + struct delayed_work softmac_scan; + struct ieee80211softmac_device *mac; }; /* private event struct */ @@ -227,7 +228,7 @@ struct ieee80211softmac_event { struct list_head list; int event_type; void *event_context; - struct work_struct work; + struct delayed_work work; notify_function_ptr fun; void *context; struct ieee80211softmac_device *mac; diff --git a/net/ieee80211/softmac/ieee80211softmac_scan.c b/net/ieee80211/softmac/ieee80211softmac_scan.c index 5507feab32d..0c85d6c24cd 100644 --- a/net/ieee80211/softmac/ieee80211softmac_scan.c +++ b/net/ieee80211/softmac/ieee80211softmac_scan.c @@ -90,12 +90,14 @@ ieee80211softmac_wait_for_scan(struct ieee80211softmac_device *sm) /* internal scanning implementation follows */ -void ieee80211softmac_scan(void *d) +void ieee80211softmac_scan(struct work_struct *work) { int invalid_channel; u8 current_channel_idx; - struct ieee80211softmac_device *sm = (struct ieee80211softmac_device *)d; - struct ieee80211softmac_scaninfo *si = sm->scaninfo; + struct ieee80211softmac_scaninfo *si = + container_of(work, struct ieee80211softmac_scaninfo, + softmac_scan.work); + struct ieee80211softmac_device *sm = si->mac; unsigned long flags; while (!(si->stop) && (si->current_channel_idx < si->number_channels)) { @@ -146,7 +148,8 @@ static inline struct ieee80211softmac_scaninfo *allocate_scaninfo(struct ieee802 struct ieee80211softmac_scaninfo *info = kmalloc(sizeof(struct ieee80211softmac_scaninfo), GFP_ATOMIC); if (unlikely(!info)) return NULL; - INIT_WORK(&info->softmac_scan, ieee80211softmac_scan, mac); + INIT_DELAYED_WORK(&info->softmac_scan, ieee80211softmac_scan); + info->mac = mac; init_completion(&info->finished); return info; } @@ -187,7 +190,7 @@ int ieee80211softmac_start_scan_implementation(struct net_device *dev) sm->scaninfo->started = 1; sm->scaninfo->stop = 0; INIT_COMPLETION(sm->scaninfo->finished); - schedule_work(&sm->scaninfo->softmac_scan); + schedule_delayed_work(&sm->scaninfo->softmac_scan, 0); spin_unlock_irqrestore(&sm->lock, flags); return 0; } diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c index 23068a830f7..2ffaebd21c5 100644 --- a/net/ieee80211/softmac/ieee80211softmac_wx.c +++ b/net/ieee80211/softmac/ieee80211softmac_wx.c @@ -122,7 +122,7 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev, sm->associnfo.associating = 1; /* queue lower level code to do work (if necessary) */ - schedule_work(&sm->associnfo.work); + schedule_delayed_work(&sm->associnfo.work, 0); out: mutex_unlock(&sm->associnfo.mutex); @@ -356,7 +356,7 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev, /* force reassociation */ mac->associnfo.bssvalid = 0; if (mac->associnfo.associated) - schedule_work(&mac->associnfo.work); + schedule_delayed_work(&mac->associnfo.work, 0); } else if (is_zero_ether_addr(data->ap_addr.sa_data)) { /* the bssid we have is no longer fixed */ mac->associnfo.bssfixed = 0; @@ -373,7 +373,7 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev, /* tell the other code that this bssid should be used no matter what */ mac->associnfo.bssfixed = 1; /* queue associate if new bssid or (old one again and not associated) */ - schedule_work(&mac->associnfo.work); + schedule_delayed_work(&mac->associnfo.work, 0); } out: diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index cdd805344c6..8c74f9168b7 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -197,9 +197,10 @@ EXPORT_SYMBOL_GPL(inet_twdr_hangman); extern void twkill_slots_invalid(void); -void inet_twdr_twkill_work(void *data) +void inet_twdr_twkill_work(struct work_struct *work) { - struct inet_timewait_death_row *twdr = data; + struct inet_timewait_death_row *twdr = + container_of(work, struct inet_timewait_death_row, twkill_work); int i; if ((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8)) diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c index f261616e460..9b933381ebb 100644 --- a/net/ipv4/ipvs/ip_vs_ctl.c +++ b/net/ipv4/ipvs/ip_vs_ctl.c @@ -221,10 +221,10 @@ static void update_defense_level(void) * Timer for checking the defense */ #define DEFENSE_TIMER_PERIOD 1*HZ -static void defense_work_handler(void *data); -static DECLARE_WORK(defense_work, defense_work_handler, NULL); +static void defense_work_handler(struct work_struct *work); +static DECLARE_DELAYED_WORK(defense_work, defense_work_handler); -static void defense_work_handler(void *data) +static void defense_work_handler(struct work_struct *work) { update_defense_level(); if (atomic_read(&ip_vs_dropentry)) diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 6dddf59c1fb..4a3889dd194 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -45,8 +45,7 @@ struct inet_timewait_death_row tcp_death_row = { .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, (unsigned long)&tcp_death_row), .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work, - inet_twdr_twkill_work, - &tcp_death_row), + inet_twdr_twkill_work), /* Short-time timewait calendar */ .twcal_hand = -1, diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index d50a02030ad..262bda808d9 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c @@ -61,7 +61,7 @@ static void ircomm_tty_flush_buffer(struct tty_struct *tty); static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch); static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout); static void ircomm_tty_hangup(struct tty_struct *tty); -static void ircomm_tty_do_softint(void *private_); +static void ircomm_tty_do_softint(struct work_struct *work); static void ircomm_tty_shutdown(struct ircomm_tty_cb *self); static void ircomm_tty_stop(struct tty_struct *tty); @@ -389,7 +389,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) self->flow = FLOW_STOP; self->line = line; - INIT_WORK(&self->tqueue, ircomm_tty_do_softint, self); + INIT_WORK(&self->tqueue, ircomm_tty_do_softint); self->max_header_size = IRCOMM_TTY_HDR_UNINITIALISED; self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED; self->close_delay = 5*HZ/10; @@ -594,15 +594,16 @@ static void ircomm_tty_flush_buffer(struct tty_struct *tty) } /* - * Function ircomm_tty_do_softint (private_) + * Function ircomm_tty_do_softint (work) * * We use this routine to give the write wakeup to the user at at a * safe time (as fast as possible after write have completed). This * can be compared to the Tx interrupt. */ -static void ircomm_tty_do_softint(void *private_) +static void ircomm_tty_do_softint(struct work_struct *work) { - struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) private_; + struct ircomm_tty_cb *self = + container_of(work, struct ircomm_tty_cb, tqueue); struct tty_struct *tty; unsigned long flags; struct sk_buff *skb, *ctrl_skb; diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 39471d3b31b..ad0057db0f9 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -61,7 +61,7 @@ #include <net/sctp/sm.h> /* Forward declarations for internal functions. */ -static void sctp_assoc_bh_rcv(struct sctp_association *asoc); +static void sctp_assoc_bh_rcv(struct work_struct *work); /* 1st Level Abstractions. */ @@ -269,9 +269,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a /* Create an input queue. */ sctp_inq_init(&asoc->base.inqueue); - sctp_inq_set_th_handler(&asoc->base.inqueue, - (void (*)(void *))sctp_assoc_bh_rcv, - asoc); + sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv); /* Create an output queue. */ sctp_outq_init(asoc, &asoc->outqueue); @@ -946,8 +944,11 @@ out: } /* Do delayed input processing. This is scheduled by sctp_rcv(). */ -static void sctp_assoc_bh_rcv(struct sctp_association *asoc) +static void sctp_assoc_bh_rcv(struct work_struct *work) { + struct sctp_association *asoc = + container_of(work, struct sctp_association, + base.inqueue.immediate); struct sctp_endpoint *ep; struct sctp_chunk *chunk; struct sock *sk; diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 33a42e90c32..129756908da 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -61,7 +61,7 @@ #include <net/sctp/sm.h> /* Forward declarations for internal helpers. */ -static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep); +static void sctp_endpoint_bh_rcv(struct work_struct *work); /* * Initialize the base fields of the endpoint structure. @@ -89,8 +89,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, sctp_inq_init(&ep->base.inqueue); /* Set its top-half handler */ - sctp_inq_set_th_handler(&ep->base.inqueue, - (void (*)(void *))sctp_endpoint_bh_rcv, ep); + sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv); /* Initialize the bind addr area */ sctp_bind_addr_init(&ep->base.bind_addr, 0); @@ -318,8 +317,11 @@ int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep, /* Do delayed input processing. This is scheduled by sctp_rcv(). * This may be called on BH or task time. */ -static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep) +static void sctp_endpoint_bh_rcv(struct work_struct *work) { + struct sctp_endpoint *ep = + container_of(work, struct sctp_endpoint, + base.inqueue.immediate); struct sctp_association *asoc; struct sock *sk; struct sctp_transport *transport; diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index cf6deed7e84..71b07466e88 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c @@ -54,7 +54,7 @@ void sctp_inq_init(struct sctp_inq *queue) queue->in_progress = NULL; /* Create a task for delivering data. */ - INIT_WORK(&queue->immediate, NULL, NULL); + INIT_WORK(&queue->immediate, NULL); queue->malloced = 0; } @@ -97,7 +97,7 @@ void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk) * on the BH related data structures. */ list_add_tail(&chunk->list, &q->in_chunk_list); - q->immediate.func(q->immediate.data); + q->immediate.func(&q->immediate); } /* Extract a chunk from an SCTP inqueue. @@ -205,9 +205,8 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) * The intent is that this routine will pull stuff out of the * inqueue and process it. */ -void sctp_inq_set_th_handler(struct sctp_inq *q, - void (*callback)(void *), void *arg) +void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback) { - INIT_WORK(&q->immediate, callback, arg); + INIT_WORK(&q->immediate, callback); } diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 00cb388ece0..d96fd466a9a 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -284,8 +284,8 @@ static struct file_operations cache_file_operations; static struct file_operations content_file_operations; static struct file_operations cache_flush_operations; -static void do_cache_clean(void *data); -static DECLARE_WORK(cache_cleaner, do_cache_clean, NULL); +static void do_cache_clean(struct work_struct *work); +static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean); void cache_register(struct cache_detail *cd) { @@ -337,7 +337,7 @@ void cache_register(struct cache_detail *cd) spin_unlock(&cache_list_lock); /* start the cleaning process */ - schedule_work(&cache_cleaner); + schedule_delayed_work(&cache_cleaner, 0); } int cache_unregister(struct cache_detail *cd) @@ -461,7 +461,7 @@ static int cache_clean(void) /* * We want to regularly clean the cache, so we need to schedule some work ... */ -static void do_cache_clean(void *data) +static void do_cache_clean(struct work_struct *work) { int delay = 5; if (cache_clean() == -1) diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 9a0b41a97f9..49dba5febbb 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -54,10 +54,11 @@ static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head, } static void -rpc_timeout_upcall_queue(void *data) +rpc_timeout_upcall_queue(struct work_struct *work) { LIST_HEAD(free_list); - struct rpc_inode *rpci = (struct rpc_inode *)data; + struct rpc_inode *rpci = + container_of(work, struct rpc_inode, queue_timeout.work); struct inode *inode = &rpci->vfs_inode; void (*destroy_msg)(struct rpc_pipe_msg *); @@ -837,7 +838,8 @@ init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) INIT_LIST_HEAD(&rpci->pipe); rpci->pipelen = 0; init_waitqueue_head(&rpci->waitq); - INIT_WORK(&rpci->queue_timeout, rpc_timeout_upcall_queue, rpci); + INIT_DELAYED_WORK(&rpci->queue_timeout, + rpc_timeout_upcall_queue); rpci->ops = NULL; } } diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index a1ab4eed41f..eff44bcdc95 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -41,7 +41,7 @@ static mempool_t *rpc_buffer_mempool __read_mostly; static void __rpc_default_timer(struct rpc_task *task); static void rpciod_killall(void); -static void rpc_async_schedule(void *); +static void rpc_async_schedule(struct work_struct *); /* * RPC tasks sit here while waiting for conditions to improve. @@ -305,7 +305,7 @@ static void rpc_make_runnable(struct rpc_task *task) if (RPC_IS_ASYNC(task)) { int status; - INIT_WORK(&task->u.tk_work, rpc_async_schedule, (void *)task); + INIT_WORK(&task->u.tk_work, rpc_async_schedule); status = queue_work(task->tk_workqueue, &task->u.tk_work); if (status < 0) { printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status); @@ -695,9 +695,9 @@ rpc_execute(struct rpc_task *task) return __rpc_execute(task); } -static void rpc_async_schedule(void *arg) +static void rpc_async_schedule(struct work_struct *work) { - __rpc_execute((struct rpc_task *)arg); + __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); } /** diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 80857470dc1..4f9a5d9791f 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -479,9 +479,10 @@ int xprt_adjust_timeout(struct rpc_rqst *req) return status; } -static void xprt_autoclose(void *args) +static void xprt_autoclose(struct work_struct *work) { - struct rpc_xprt *xprt = (struct rpc_xprt *)args; + struct rpc_xprt *xprt = + container_of(work, struct rpc_xprt, task_cleanup); xprt_disconnect(xprt); xprt->ops->close(xprt); @@ -932,7 +933,7 @@ struct rpc_xprt *xprt_create_transport(int proto, struct sockaddr *ap, size_t si INIT_LIST_HEAD(&xprt->free); INIT_LIST_HEAD(&xprt->recv); - INIT_WORK(&xprt->task_cleanup, xprt_autoclose, xprt); + INIT_WORK(&xprt->task_cleanup, xprt_autoclose); init_timer(&xprt->timer); xprt->timer.function = xprt_init_autodisconnect; xprt->timer.data = (unsigned long) xprt; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 757fc91ef25..cfe3c15be94 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1060,13 +1060,14 @@ static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock) /** * xs_udp_connect_worker - set up a UDP socket - * @args: RPC transport to connect + * @work: RPC transport to connect * * Invoked by a work queue tasklet. */ -static void xs_udp_connect_worker(void *args) +static void xs_udp_connect_worker(struct work_struct *work) { - struct rpc_xprt *xprt = (struct rpc_xprt *) args; + struct rpc_xprt *xprt = + container_of(work, struct rpc_xprt, connect_worker.work); struct socket *sock = xprt->sock; int err, status = -EIO; @@ -1144,13 +1145,14 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt) /** * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint - * @args: RPC transport to connect + * @work: RPC transport to connect * * Invoked by a work queue tasklet. */ -static void xs_tcp_connect_worker(void *args) +static void xs_tcp_connect_worker(struct work_struct *work) { - struct rpc_xprt *xprt = (struct rpc_xprt *)args; + struct rpc_xprt *xprt = + container_of(work, struct rpc_xprt, connect_worker.work); struct socket *sock = xprt->sock; int err, status = -EIO; @@ -1262,7 +1264,7 @@ static void xs_connect(struct rpc_task *task) xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; } else { dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); - schedule_work(&xprt->connect_worker); + schedule_delayed_work(&xprt->connect_worker, 0); /* flush_scheduled_work can sleep... */ if (!RPC_IS_ASYNC(task)) @@ -1375,7 +1377,7 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) /* XXX: header size can vary due to auth type, IPv6, etc. */ xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); - INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt); + INIT_DELAYED_WORK(&xprt->connect_worker, xs_udp_connect_worker); xprt->bind_timeout = XS_BIND_TO; xprt->connect_timeout = XS_UDP_CONN_TO; xprt->reestablish_timeout = XS_UDP_REEST_TO; @@ -1420,7 +1422,7 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; - INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt); + INIT_DELAYED_WORK(&xprt->connect_worker, xs_tcp_connect_worker); xprt->bind_timeout = XS_BIND_TO; xprt->connect_timeout = XS_TCP_CONN_TO; xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 64d3938f74c..f6c77bd36fd 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -392,7 +392,7 @@ static void xfrm_policy_gc_kill(struct xfrm_policy *policy) xfrm_pol_put(policy); } -static void xfrm_policy_gc_task(void *data) +static void xfrm_policy_gc_task(struct work_struct *work) { struct xfrm_policy *policy; struct hlist_node *entry, *tmp; @@ -580,7 +580,7 @@ static inline int xfrm_byidx_should_resize(int total) static DEFINE_MUTEX(hash_resize_mutex); -static void xfrm_hash_resize(void *__unused) +static void xfrm_hash_resize(struct work_struct *__unused) { int dir, total; @@ -597,7 +597,7 @@ static void xfrm_hash_resize(void *__unused) mutex_unlock(&hash_resize_mutex); } -static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL); +static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize); /* Generate new index... KAME seems to generate them ordered by cost * of an absolute inpredictability of ordering of rules. This will not pass. */ @@ -2116,7 +2116,7 @@ static void __init xfrm_policy_init(void) panic("XFRM: failed to allocate bydst hash\n"); } - INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL); + INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task); register_netdevice_notifier(&xfrm_dev_notifier); } diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 864962bbda9..da54a64ccfa 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -115,7 +115,7 @@ static unsigned long xfrm_hash_new_size(void) static DEFINE_MUTEX(hash_resize_mutex); -static void xfrm_hash_resize(void *__unused) +static void xfrm_hash_resize(struct work_struct *__unused) { struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi; unsigned long nsize, osize; @@ -168,7 +168,7 @@ out_unlock: mutex_unlock(&hash_resize_mutex); } -static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL); +static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize); DECLARE_WAIT_QUEUE_HEAD(km_waitq); EXPORT_SYMBOL(km_waitq); @@ -207,7 +207,7 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x) kfree(x); } -static void xfrm_state_gc_task(void *data) +static void xfrm_state_gc_task(struct work_struct *data) { struct xfrm_state *x; struct hlist_node *entry, *tmp; @@ -1568,6 +1568,6 @@ void __init xfrm_state_init(void) panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes."); xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1); - INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL); + INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task); } |