*/
void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
{
- __sta_info_get(sta);
- if (mpath->next_hop)
- sta_info_put(mpath->next_hop);
- mpath->next_hop = sta;
+ rcu_assign_pointer(mpath->next_hop, sta);
}
/**
* mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
* @idx: index
- * @dev: local interface
+ * @dev: local interface, or NULL for all entries
*
* Returns: pointer to the mesh path structure, or NULL if not found.
*
int i;
int j = 0;
- for_each_mesh_entry(mesh_paths, p, node, i)
+ for_each_mesh_entry(mesh_paths, p, node, i) {
+ if (dev && node->mpath->dev != dev)
+ continue;
if (j++ == idx) {
if (MPATH_EXPIRED(node->mpath)) {
spin_lock_bh(&node->mpath->state_lock);
}
return node->mpath;
}
+ }
return NULL;
}
if (atomic_add_unless(&sdata->u.sta.mpaths, 1, MESH_MAX_MPATHS) == 0)
return -ENOSPC;
- read_lock(&pathtbl_resize_lock);
-
+ err = -ENOMEM;
new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL);
- if (!new_mpath) {
- atomic_dec(&sdata->u.sta.mpaths);
- err = -ENOMEM;
- goto endadd2;
- }
+ if (!new_mpath)
+ goto err_path_alloc;
+
+ new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
+ if (!new_node)
+ goto err_node_alloc;
+
+ read_lock(&pathtbl_resize_lock);
memcpy(new_mpath->dst, dst, ETH_ALEN);
new_mpath->dev = dev;
new_mpath->flags = 0;
skb_queue_head_init(&new_mpath->frame_queue);
- new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
new_node->mpath = new_mpath;
new_mpath->timer.data = (unsigned long) new_mpath;
new_mpath->timer.function = mesh_path_timer;
spin_lock(&mesh_paths->hashwlock[hash_idx]);
+ err = -EEXIST;
hlist_for_each_entry(node, n, bucket, list) {
mpath = node->mpath;
- if (mpath->dev == dev && memcmp(dst, mpath->dst, ETH_ALEN)
- == 0) {
- err = -EEXIST;
- atomic_dec(&sdata->u.sta.mpaths);
- kfree(new_node);
- kfree(new_mpath);
- goto endadd;
- }
+ if (mpath->dev == dev && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
+ goto err_exists;
}
hlist_add_head_rcu(&new_node->list, bucket);
mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1))
grow = 1;
-endadd:
spin_unlock(&mesh_paths->hashwlock[hash_idx]);
-endadd2:
read_unlock(&pathtbl_resize_lock);
- if (!err && grow) {
+ if (grow) {
struct mesh_table *oldtbl, *newtbl;
write_lock(&pathtbl_resize_lock);
newtbl = mesh_table_grow(mesh_paths);
if (!newtbl) {
write_unlock(&pathtbl_resize_lock);
- return -ENOMEM;
+ return 0;
}
rcu_assign_pointer(mesh_paths, newtbl);
+ write_unlock(&pathtbl_resize_lock);
+
synchronize_rcu();
mesh_table_free(oldtbl, false);
- write_unlock(&pathtbl_resize_lock);
}
+ return 0;
+
+err_exists:
+ spin_unlock(&mesh_paths->hashwlock[hash_idx]);
+ read_unlock(&pathtbl_resize_lock);
+ kfree(new_node);
+err_node_alloc:
+ kfree(new_mpath);
+err_path_alloc:
+ atomic_dec(&sdata->u.sta.mpaths);
return err;
}
struct mesh_path *mpath;
struct mpath_node *node;
struct hlist_node *p;
- struct net_device *dev = sta->dev;
+ struct net_device *dev = sta->sdata->dev;
int i;
rcu_read_lock();
*
* @sta - mesh peer to match
*
- * RCU notes: this function is called when a mesh plink transitions from ESTAB
- * to any other state, since ESTAB state is the only one that allows path
- * creation. This will happen before the sta can be freed (since we hold
- * a reference to it) so any reader in a rcu read block will be protected
- * against the plink dissapearing.
+ * RCU notes: this function is called when a mesh plink transitions from
+ * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
+ * allows path creation. This will happen before the sta can be freed (because
+ * sta_info_destroy() calls this) so any reader in a rcu read block will be
+ * protected against the plink disappearing.
*/
void mesh_path_flush_by_nexthop(struct sta_info *sta)
{
struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
struct ieee80211_sub_if_data *sdata =
IEEE80211_DEV_TO_SUB_IF(node->mpath->dev);
- if (node->mpath->next_hop)
- sta_info_put(node->mpath->next_hop);
+
+ del_timer_sync(&node->mpath->timer);
atomic_dec(&sdata->u.sta.mpaths);
kfree(node->mpath);
kfree(node);
* @dev: local interface
*
* Returns: 0 if succesful
- *
- * State: if the path is being resolved, the deletion will be postponed until
- * the path resolution completes or times out.
*/
int mesh_path_del(u8 *addr, struct net_device *dev)
{
if (mpath->dev == dev &&
memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
spin_lock_bh(&mpath->state_lock);
- if (mpath->flags & MESH_PATH_RESOLVING) {
- mpath->flags |= MESH_PATH_DELETE;
- } else {
- mpath->flags |= MESH_PATH_RESOLVING;
- hlist_del_rcu(&node->list);
- call_rcu(&node->rcu, mesh_path_node_reclaim);
- atomic_dec(&mesh_paths->entries);
- }
+ mpath->flags |= MESH_PATH_RESOLVING;
+ hlist_del_rcu(&node->list);
+ call_rcu(&node->rcu, mesh_path_node_reclaim);
+ atomic_dec(&mesh_paths->entries);
spin_unlock_bh(&mpath->state_lock);
goto enddel;
}
struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
mpath = node->mpath;
hlist_del_rcu(p);
- synchronize_rcu();
if (free_leafs)
kfree(mpath);
kfree(node);
}
-static void mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
+static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
{
struct mesh_path *mpath;
struct mpath_node *node, *new_node;
u32 hash_idx;
+ new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
+ if (new_node == NULL)
+ return -ENOMEM;
+
node = hlist_entry(p, struct mpath_node, list);
mpath = node->mpath;
- new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
new_node->mpath = mpath;
hash_idx = mesh_table_hash(mpath->dst, mpath->dev, newtbl);
hlist_add_head(&new_node->list,
&newtbl->hash_buckets[hash_idx]);
+ return 0;
}
int mesh_pathtbl_init(void)