From: Dongxiao Xu <dongxiao.xu@intel.com>
Subject: Netback: Generalize static/global variables into 'struct xen_netbk'.
Patch-mainline: Never, SUSE-Xen specific

 Put all the static/global variables in netback.c into xen_netback
 structure. Do some preparations for the support of netback multiple
 threads.

Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>

jb: various cleanups
Acked-by: jbeulich@novell.com

--- a/drivers/xen/netback/common.h
+++ b/drivers/xen/netback/common.h
@@ -219,4 +219,65 @@ static inline int netbk_can_sg(struct ne
 	return netif->can_sg;
 }
 
+struct pending_tx_info {
+	netif_tx_request_t req;
+	grant_handle_t grant_handle;
+	netif_t *netif;
+};
+typedef unsigned int pending_ring_idx_t;
+
+struct netbk_rx_meta {
+	skb_frag_t frag;
+	u16 id;
+	u8 copy:2;
+	u8 tail:1;
+};
+
+struct netbk_tx_pending_inuse {
+	struct list_head list;
+	unsigned long alloc_time;
+};
+
+#define MAX_PENDING_REQS (1U << CONFIG_XEN_NETDEV_TX_SHIFT)
+#define MAX_MFN_ALLOC 64
+
+struct xen_netbk {
+	struct {
+		pending_ring_idx_t pending_prod, pending_cons;
+		pending_ring_idx_t dealloc_prod, dealloc_cons;
+		struct sk_buff_head queue;
+		struct tasklet_struct tasklet;
+		struct list_head schedule_list;
+		spinlock_t schedule_list_lock;
+		spinlock_t release_lock;
+		struct page **mmap_pages;
+		struct timer_list pending_timer;
+		struct list_head pending_inuse_head;
+		struct pending_tx_info pending_info[MAX_PENDING_REQS];
+		struct netbk_tx_pending_inuse pending_inuse[MAX_PENDING_REQS];
+		u16 pending_ring[MAX_PENDING_REQS];
+		u16 dealloc_ring[MAX_PENDING_REQS];
+		union {
+			gnttab_map_grant_ref_t map_ops[MAX_PENDING_REQS];
+			gnttab_unmap_grant_ref_t unmap_ops[MAX_PENDING_REQS];
+			gnttab_copy_t copy_ops[2 * MAX_PENDING_REQS - 1];
+			multicall_entry_t mcl[0];
+		};
+		gnttab_copy_t copy_op;
+		netif_tx_request_t slots[XEN_NETIF_NR_SLOTS_MIN];
+	} tx;
+
+	struct xen_netbk_rx {
+		struct sk_buff_head queue;
+		struct tasklet_struct tasklet;
+		struct timer_list timer;
+		unsigned int alloc_index;
+		struct multicall_entry mcl[NET_RX_RING_SIZE+3];
+		struct mmu_update mmu[NET_RX_RING_SIZE];
+		struct gnttab_copy grant_copy_op[2 * NET_RX_RING_SIZE];
+		struct gnttab_transfer grant_trans_op;
+		struct netbk_rx_meta meta[NET_RX_RING_SIZE];
+		unsigned long mfn_list[MAX_MFN_ALLOC];
+	} rx;
+};
 #endif /* __NETIF__BACKEND__COMMON_H__ */
--- a/drivers/xen/netback/netback.c
+++ b/drivers/xen/netback/netback.c
@@ -37,6 +37,7 @@
 #include "common.h"
 #include <linux/if_vlan.h>
 #include <linux/pfn.h>
+#include <linux/vmalloc.h>
 #include <net/tcp.h>
 #include <xen/balloon.h>
 #include <xen/evtchn.h>
@@ -46,12 +47,10 @@
 
 /*define NETBE_DEBUG_INTERRUPT*/
 
-struct netbk_rx_meta {
-	skb_frag_t frag;
-	u16 id;
-	u8 copy:2;
-	u8 tail:1;
-};
+static struct xen_netbk *__read_mostly xen_netbk;
+static const unsigned int netbk_nr_groups = 1;
+
+#define GET_GROUP_INDEX(netif) (0)
 
 struct netbk_rx_cb {
 	unsigned int nr_frags;
@@ -65,12 +64,7 @@ struct netbk_tx_cb {
 };
 #define netbk_tx_cb(skb) ((struct netbk_tx_cb *)skb->cb)
 
-struct netbk_tx_pending_inuse {
-	struct list_head list;
-	unsigned long alloc_time;
-};
-
-static void netif_idx_release(u16 pending_idx);
+static void netif_idx_release(struct xen_netbk *, u16 pending_idx);
 static bool make_tx_response(netif_t *, const netif_tx_request_t *, s8 st,
 			     netif_t **);
 static netif_rx_response_t *make_rx_response(netif_t *netif, 
@@ -80,52 +74,58 @@ static netif_rx_response_t *make_rx_resp
 					     u16      size,
 					     u16      flags);
 
-static void net_tx_action(unsigned long unused);
-static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
-
-static void net_rx_action(unsigned long unused);
-static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0);
-
-static struct timer_list net_timer;
-static struct timer_list netbk_tx_pending_timer;
-
-#define MAX_PENDING_REQS (1U << CONFIG_XEN_NETDEV_TX_SHIFT)
+static void net_tx_action(unsigned long group);
+static void net_rx_action(unsigned long group);
 
 /* Discriminate from any valid pending_idx value. */
 #define INVALID_PENDING_IDX 0xffff
 
-static struct sk_buff_head rx_queue;
-
-static struct page **mmap_pages;
-static inline unsigned long idx_to_pfn(u16 idx)
+static inline unsigned long idx_to_pfn(struct xen_netbk *netbk, u16 idx)
 {
-	return page_to_pfn(mmap_pages[idx]);
+	return page_to_pfn(netbk->tx.mmap_pages[idx]);
 }
 
-static inline unsigned long idx_to_kaddr(u16 idx)
+static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk, u16 idx)
 {
-	return (unsigned long)pfn_to_kaddr(idx_to_pfn(idx));
+	return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx));
 }
 
 /* extra field used in struct page */
-static inline void netif_set_page_index(struct page *pg, unsigned int index)
-{
-	*(unsigned long *)&pg->mapping = index;
-}
+union page_ext {
+	struct {
+#if BITS_PER_LONG < 64
+#define GROUP_WIDTH (BITS_PER_LONG - CONFIG_XEN_NETDEV_TX_SHIFT)
+#define MAX_GROUPS ((1U << GROUP_WIDTH) - 1)
+		unsigned int grp:GROUP_WIDTH;
+		unsigned int idx:CONFIG_XEN_NETDEV_TX_SHIFT;
+#else
+#define MAX_GROUPS UINT_MAX
+		unsigned int grp, idx;
+#endif
+	} e;
+	void *mapping;
+};
 
-static inline int netif_page_index(struct page *pg)
+static inline void netif_set_page_ext(struct page *pg, unsigned int group,
+				      unsigned int idx)
 {
-	unsigned long idx = (unsigned long)pg->mapping;
+	union page_ext ext = { .e = { .grp = group + 1, .idx = idx } };
 
-	if (!PageForeign(pg))
-		return -1;
-
-	if ((idx >= MAX_PENDING_REQS) || (mmap_pages[idx] != pg))
-		return -1;
-
-	return idx;
+	BUILD_BUG_ON(sizeof(ext) > sizeof(ext.mapping));
+	pg->mapping = ext.mapping;
 }
 
+#define netif_get_page_ext(pg, netbk, index) do { \
+	struct page *pg__ = (pg); \
+	union page_ext ext__ = { .mapping = pg__->mapping }; \
+	unsigned int grp__ = ext__.e.grp - 1; \
+	unsigned int idx__ = index = ext__.e.idx; \
+	netbk = grp__ < netbk_nr_groups ? &xen_netbk[grp__] : NULL; \
+	if (!PageForeign(pg__) || idx__ >= MAX_PENDING_REQS || \
+	    (netbk && netbk->tx.mmap_pages[idx__] != pg__)) \
+		netbk = NULL; \
+} while (0)
+
 static u16 frag_get_pending_idx(const skb_frag_t *frag)
 {
 	return (u16)frag->page_offset;
@@ -145,41 +145,13 @@ static void frag_set_pending_idx(skb_fra
 			 sizeof(struct iphdr) + MAX_IPOPTLEN + \
 			 sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
 
-static struct pending_tx_info {
-	netif_tx_request_t req;
-	netif_t *netif;
-} pending_tx_info[MAX_PENDING_REQS];
-static u16 pending_ring[MAX_PENDING_REQS];
-typedef unsigned int PEND_RING_IDX;
 #define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
-static PEND_RING_IDX pending_prod, pending_cons;
-#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
 
-/* Freed TX SKBs get batched on this ring before return to pending_ring. */
-static u16 dealloc_ring[MAX_PENDING_REQS];
-static PEND_RING_IDX dealloc_prod, dealloc_cons;
-
-/* Doubly-linked list of in-use pending entries. */
-static struct netbk_tx_pending_inuse pending_inuse[MAX_PENDING_REQS];
-static LIST_HEAD(pending_inuse_head);
-
-static struct sk_buff_head tx_queue;
-
-static grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
-static union {
-	gnttab_map_grant_ref_t map_ops[MAX_PENDING_REQS];
-	gnttab_unmap_grant_ref_t unmap_ops[MAX_PENDING_REQS];
-	gnttab_copy_t copy_ops[2 * MAX_PENDING_REQS];
-	multicall_entry_t mcl[0];
-} tx;
-static netif_tx_request_t tx_slots[XEN_NETIF_NR_SLOTS_MIN];
-
-static struct list_head net_schedule_list;
-static spinlock_t net_schedule_list_lock;
-
-#define MAX_MFN_ALLOC 64
-static unsigned long mfn_list[MAX_MFN_ALLOC];
-static unsigned int alloc_index = 0;
+static inline pending_ring_idx_t nr_pending_reqs(const struct xen_netbk *netbk)
+{
+	return MAX_PENDING_REQS -
+		netbk->tx.pending_prod + netbk->tx.pending_cons;
+}
 
 /*
  * This is the maximum slots a TX request can have. If a guest sends a TX
@@ -199,13 +171,13 @@ MODULE_PARM_DESC(permute_returns, "Rando
 
 int netbk_copy_skb_mode;
 
-static inline unsigned long alloc_mfn(void)
+static inline unsigned long alloc_mfn(struct xen_netbk_rx *netbk)
 {
-	BUG_ON(alloc_index == 0);
-	return mfn_list[--alloc_index];
+	BUG_ON(netbk->alloc_index == 0);
+	return netbk->mfn_list[--netbk->alloc_index];
 }
 
-static int check_mfn(int nr)
+static int check_mfn(struct xen_netbk_rx *netbk, unsigned int nr)
 {
 	struct xen_memory_reservation reservation = {
 		.extent_order = 0,
@@ -213,16 +185,17 @@ static int check_mfn(int nr)
 	};
 	int rc;
 
-	if (likely(alloc_index >= nr))
+	if (likely(netbk->alloc_index >= nr))
 		return 0;
 
-	set_xen_guest_handle(reservation.extent_start, mfn_list + alloc_index);
-	reservation.nr_extents = MAX_MFN_ALLOC - alloc_index;
+	set_xen_guest_handle(reservation.extent_start,
+			     netbk->mfn_list + netbk->alloc_index);
+	reservation.nr_extents = MAX_MFN_ALLOC - netbk->alloc_index;
 	rc = HYPERVISOR_memory_op(XENMEM_increase_reservation, &reservation);
 	if (likely(rc > 0))
-		alloc_index += rc;
+		netbk->alloc_index += rc;
 
-	return alloc_index >= nr ? 0 : -ENOMEM;
+	return netbk->alloc_index >= nr ? 0 : -ENOMEM;
 }
 
 static void flush_notify_list(netif_t *list, unsigned int idx,
@@ -251,12 +224,14 @@ static void flush_notify_list(netif_t *l
 		BUG();
 }
 
-static inline void maybe_schedule_tx_action(void)
+static inline void maybe_schedule_tx_action(netif_t *netif)
 {
+	struct xen_netbk *netbk = &xen_netbk[GET_GROUP_INDEX(netif)];
+
 	smp_mb();
-	if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
-	    !list_empty(&net_schedule_list))
-		tasklet_schedule(&net_tx_tasklet);
+	if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) &&
+	    !list_empty(&netbk->tx.schedule_list))
+		tasklet_schedule(&netbk->tx.tasklet);
 }
 
 static struct sk_buff *netbk_copy_skb(struct sk_buff *skb)
@@ -382,6 +357,7 @@ static unsigned int netbk_count_slots(co
 int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	netif_t *netif = netdev_priv(dev);
+	struct xen_netbk_rx *netbk;
 
 	BUG_ON(skb->dev != dev);
 
@@ -433,8 +409,9 @@ int netif_be_start_xmit(struct sk_buff *
 		}
 	}
 
-	skb_queue_tail(&rx_queue, skb);
-	tasklet_schedule(&net_rx_tasklet);
+	netbk = &xen_netbk[GET_GROUP_INDEX(netif)].rx;
+	skb_queue_tail(&netbk->queue, skb);
+	tasklet_schedule(&netbk->tasklet);
 
 	return NETDEV_TX_OK;
 
@@ -459,7 +436,7 @@ static void xen_network_done_notify(void
  */
 int xen_network_done(void)
 {
-	return skb_queue_empty(&rx_queue);
+	return skb_queue_empty(&netbk->rx.queue);
 }
 #endif
 
@@ -489,26 +466,28 @@ static void netbk_gop_frag(netif_t *neti
 	gnttab_copy_t *copy_gop;
 	multicall_entry_t *mcl;
 	netif_rx_request_t *req;
-	unsigned long old_mfn, new_mfn;
-	int idx = netif_page_index(page);
-
-	old_mfn = virt_to_mfn(page_address(page));
 
 	req = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons + i);
 	if (netif->copying_receiver) {
+		struct xen_netbk *netbk;
+		unsigned int idx;
+
 		/* The fragment needs to be copied rather than
 		   flipped. */
 		meta->copy++;
 		copy_gop = npo->copy + npo->copy_prod++;
 		copy_gop->flags = GNTCOPY_dest_gref;
-		if (idx > -1) {
-			struct pending_tx_info *src_pend = &pending_tx_info[idx];
+		netif_get_page_ext(page, netbk, idx);
+		if (netbk) {
+			struct pending_tx_info *src_pend;
+
+			src_pend = &netbk->tx.pending_info[idx];
 			copy_gop->source.domid = src_pend->netif->domid;
 			copy_gop->source.u.ref = src_pend->req.gref;
 			copy_gop->flags |= GNTCOPY_source_gref;
 		} else {
 			copy_gop->source.domid = DOMID_SELF;
-			copy_gop->source.u.gmfn = old_mfn;
+			copy_gop->source.u.gmfn = pfn_to_mfn(page_to_pfn(page));
 		}
 		copy_gop->source.offset = offset;
 		copy_gop->dest.domid = netif->domid;
@@ -516,8 +495,14 @@ static void netbk_gop_frag(netif_t *neti
 		copy_gop->dest.u.ref = req->gref;
 		copy_gop->len = size;
 	} else {
+		gop = npo->trans - npo->trans_prod++;
+		gop->mfn = pfn_to_mfn(page_to_pfn(page));
+		gop->domid = netif->domid;
+		gop->ref = req->gref;
+
 		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-			new_mfn = alloc_mfn();
+			unsigned long new_mfn =
+				alloc_mfn(&xen_netbk[GET_GROUP_INDEX(netif)].rx);
 
 			/*
 			 * Set the new P2M table entry before
@@ -537,11 +522,6 @@ static void netbk_gop_frag(netif_t *neti
 				MMU_MACHPHYS_UPDATE;
 			mmu->val = page_to_pfn(page);
 		}
-
-		gop = npo->trans + npo->trans_prod++;
-		gop->mfn = old_mfn;
-		gop->domid = netif->domid;
-		gop->ref = req->gref;
 	}
 	meta->id = req->id;
 }
@@ -672,7 +652,7 @@ static int netbk_check_gop(unsigned int
 				BUG_ON(mcl->result != 0);
 			}
 
-			gop = npo->trans + npo->trans_cons++;
+			gop = npo->trans - npo->trans_cons++;
 			/* Check the reassignment error code. */
 			if (unlikely(gop->status != GNTST_okay)) {
 				netdev_dbg(netif->dev,
@@ -710,7 +690,7 @@ static unsigned int netbk_add_frag_respo
 	return n;
 }
 
-static void net_rx_action(unsigned long unused)
+static void net_rx_action(unsigned long group)
 {
 	netif_t *netif, *notify_head = NULL, *notify_tail = NULL;
 	s8 status;
@@ -723,23 +703,15 @@ static void net_rx_action(unsigned long
 	int nr_frags;
 	int count;
 	unsigned long offset;
-
-	/*
-	 * Putting hundreds of bytes on the stack is considered rude.
-	 * Static works because a tasklet can only be on one CPU at any time.
-	 */
-	static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+3];
-	static mmu_update_t rx_mmu[NET_RX_RING_SIZE];
-	static gnttab_transfer_t grant_trans_op[NET_RX_RING_SIZE];
-	static gnttab_copy_t grant_copy_op[2 * NET_RX_RING_SIZE];
-	static struct netbk_rx_meta meta[NET_RX_RING_SIZE];
+	struct xen_netbk_rx *netbk = &xen_netbk[group].rx;
 
 	struct netrx_pending_operations npo = {
-		mmu: rx_mmu,
-		trans: grant_trans_op,
-		copy: grant_copy_op,
-		mcl: rx_mcl,
-		meta: meta};
+		.mmu   = netbk->mmu,
+		.trans = &netbk->grant_trans_op,
+		.copy  = netbk->grant_copy_op,
+		.mcl   = netbk->mcl,
+		.meta  = netbk->meta,
+	};
 
 	BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct netbk_rx_cb));
 
@@ -747,23 +719,23 @@ static void net_rx_action(unsigned long
 
 	count = 0;
 
-	while ((skb = skb_dequeue(&rx_queue)) != NULL) {
+	while ((skb = skb_dequeue(&netbk->queue)) != NULL) {
 		nr_frags = netbk_rx_cb(skb)->nr_slots;
 
 		/* Filled the batch queue? */
 		if (count + nr_frags > NET_RX_RING_SIZE) {
-			skb_queue_head(&rx_queue, skb);
+			skb_queue_head(&netbk->queue, skb);
 			break;
 		}
 
 		if (!xen_feature(XENFEAT_auto_translated_physmap) &&
 		    !((netif_t *)netdev_priv(skb->dev))->copying_receiver &&
-		    check_mfn(nr_frags)) {
+		    check_mfn(netbk, nr_frags)) {
 			/* Memory squeeze? Back off for an arbitrary while. */
 			if ( net_ratelimit() )
 				netdev_warn(skb->dev, "memory squeeze\n");
-			mod_timer(&net_timer, jiffies + HZ);
-			skb_queue_head(&rx_queue, skb);
+			mod_timer(&netbk->timer, jiffies + HZ);
+			skb_queue_head(&netbk->queue, skb);
 			break;
 		}
 
@@ -772,39 +744,40 @@ static void net_rx_action(unsigned long
 		__skb_queue_tail(&rxq, skb);
 	}
 
-	BUG_ON(npo.meta_prod > ARRAY_SIZE(meta));
+	BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta));
 
 	npo.mmu_mcl = npo.mcl_prod;
 	if (npo.mcl_prod) {
 		BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
-		BUG_ON(npo.mmu_prod > ARRAY_SIZE(rx_mmu));
+		BUG_ON(npo.mmu_prod > ARRAY_SIZE(netbk->mmu));
 		mcl = npo.mcl + npo.mcl_prod++;
 
 		BUG_ON(mcl[-1].op != __HYPERVISOR_update_va_mapping);
 		mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
 
-		MULTI_mmu_update(mcl, rx_mmu, npo.mmu_prod, 0, DOMID_SELF);
+		MULTI_mmu_update(mcl, netbk->mmu, npo.mmu_prod, 0,
+				 DOMID_SELF);
 	}
 
-	if (npo.trans_prod) {
-		BUG_ON(npo.trans_prod > ARRAY_SIZE(grant_trans_op));
+	BUILD_BUG_ON(sizeof(netbk->grant_trans_op)
+		     > sizeof(*netbk->grant_copy_op));
+	BUG_ON(npo.copy_prod + npo.trans_prod
+	       > ARRAY_SIZE(netbk->grant_copy_op) + 1);
+	if (npo.trans_prod)
 		MULTI_grant_table_op(npo.mcl + npo.mcl_prod++,
-				     GNTTABOP_transfer, grant_trans_op,
+				     GNTTABOP_transfer,
+				     npo.trans + 1 - npo.trans_prod,
 				     npo.trans_prod);
-	}
 
-	if (npo.copy_prod) {
-		BUG_ON(npo.copy_prod > ARRAY_SIZE(grant_copy_op));
+	if (npo.copy_prod)
 		MULTI_grant_table_op(npo.mcl + npo.mcl_prod++,
-				     GNTTABOP_copy, grant_copy_op,
-				     npo.copy_prod);
-	}
+				     GNTTABOP_copy, npo.copy, npo.copy_prod);
 
 	/* Nothing to do? */
 	if (!npo.mcl_prod)
 		return;
 
-	BUG_ON(npo.mcl_prod > ARRAY_SIZE(rx_mcl));
+	BUG_ON(npo.mcl_prod > ARRAY_SIZE(netbk->mcl));
 
 	ret = HYPERVISOR_multicall(npo.mcl, npo.mcl_prod);
 	BUG_ON(ret != 0);
@@ -831,13 +804,14 @@ static void net_rx_action(unsigned long
 			skb_shinfo(skb)->nr_frags = 0;
 			skb_shinfo(skb)->tx_flags = 0;
 			skb_frag_list_init(skb);
-			netbk_free_pages(nr_frags, meta + npo.meta_cons + 1);
+			netbk_free_pages(nr_frags,
+					 netbk->meta + npo.meta_cons + 1);
 		}
 
 		skb->dev->stats.tx_bytes += skb->len;
 		skb->dev->stats.tx_packets++;
 
-		id = meta[npo.meta_cons].id;
+		id = netbk->meta[npo.meta_cons].id;
 		flags = nr_frags ? XEN_NETRXF_more_data : 0;
 
 		switch (skb->ip_summed) {
@@ -850,14 +824,14 @@ static void net_rx_action(unsigned long
 			break;
 		}
 
-		if (meta[npo.meta_cons].copy)
+		if (netbk->meta[npo.meta_cons].copy)
 			offset = 0;
 		else
 			offset = offset_in_page(skb->data);
 		resp = make_rx_response(netif, id, status, offset,
 					skb_headlen(skb), flags);
 
-		if (meta[npo.meta_cons].frag.size) {
+		if (netbk->meta[npo.meta_cons].frag.size) {
 			struct netif_extra_info *gso =
 				(struct netif_extra_info *)
 				RING_GET_RESPONSE(&netif->rx,
@@ -865,7 +839,7 @@ static void net_rx_action(unsigned long
 
 			resp->flags |= XEN_NETRXF_extra_info;
 
-			gso->u.gso.size = meta[npo.meta_cons].frag.size;
+			gso->u.gso.size = netbk->meta[npo.meta_cons].frag.size;
 			gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
 			gso->u.gso.pad = 0;
 			gso->u.gso.features = 0;
@@ -875,7 +849,7 @@ static void net_rx_action(unsigned long
 		}
 
 		nr_frags = netbk_add_frag_responses(netif, status,
-						    meta + npo.meta_cons + 1,
+						    netbk->meta + npo.meta_cons + 1,
 						    nr_frags);
 
 		RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->rx, ret);
@@ -900,26 +874,27 @@ static void net_rx_action(unsigned long
 	}
 
 	if (notify_head)
-		flush_notify_list(notify_head, RX_IDX, rx_mcl,
-				  ARRAY_SIZE(rx_mcl));
+		flush_notify_list(notify_head, RX_IDX, netbk->mcl,
+				  ARRAY_SIZE(netbk->mcl));
 
 	/* More work to do? */
-	if (!skb_queue_empty(&rx_queue) && !timer_pending(&net_timer))
-		tasklet_schedule(&net_rx_tasklet);
+	if (!skb_queue_empty(&netbk->queue) &&
+	    !timer_pending(&netbk->timer))
+		tasklet_schedule(&netbk->tasklet);
 #if 0
 	else
 		xen_network_done_notify();
 #endif
 }
 
-static void net_alarm(unsigned long unused)
+static void net_alarm(unsigned long group)
 {
-	tasklet_schedule(&net_rx_tasklet);
+	tasklet_schedule(&xen_netbk[group].rx.tasklet);
 }
 
-static void netbk_tx_pending_timeout(unsigned long unused)
+static void netbk_tx_pending_timeout(unsigned long group)
 {
-	tasklet_schedule(&net_tx_tasklet);
+	tasklet_schedule(&xen_netbk[group].tx.tasklet);
 }
 
 static int __on_net_schedule_list(netif_t *netif)
@@ -927,7 +902,7 @@ static int __on_net_schedule_list(netif_
 	return netif->list.next != NULL;
 }
 
-/* Must be called with net_schedule_list_lock held. */
+/* Must be called with netbk->tx.schedule_list_lock held. */
 static void remove_from_net_schedule_list(netif_t *netif)
 {
 	if (likely(__on_net_schedule_list(netif))) {
@@ -937,34 +912,36 @@ static void remove_from_net_schedule_lis
 	}
 }
 
-static netif_t *poll_net_schedule_list(void)
+static netif_t *poll_net_schedule_list(struct xen_netbk *netbk)
 {
 	netif_t *netif = NULL;
 
-	spin_lock_irq(&net_schedule_list_lock);
-	if (!list_empty(&net_schedule_list)) {
-		netif = list_first_entry(&net_schedule_list, netif_t, list);
+	spin_lock_irq(&netbk->tx.schedule_list_lock);
+	if (!list_empty(&netbk->tx.schedule_list)) {
+		netif = list_first_entry(&netbk->tx.schedule_list,
+					 netif_t, list);
 		netif_get(netif);
 		remove_from_net_schedule_list(netif);
 	}
-	spin_unlock_irq(&net_schedule_list_lock);
+	spin_unlock_irq(&netbk->tx.schedule_list_lock);
 	return netif;
 }
 
 static void add_to_net_schedule_list_tail(netif_t *netif)
 {
+	struct xen_netbk *netbk = &xen_netbk[GET_GROUP_INDEX(netif)];
 	unsigned long flags;
 
 	if (__on_net_schedule_list(netif))
 		return;
 
-	spin_lock_irqsave(&net_schedule_list_lock, flags);
+	spin_lock_irqsave(&netbk->tx.schedule_list_lock, flags);
 	if (!__on_net_schedule_list(netif) &&
 	    likely(netif_schedulable(netif))) {
-		list_add_tail(&netif->list, &net_schedule_list);
+		list_add_tail(&netif->list, &netbk->tx.schedule_list);
 		netif_get(netif);
 	}
-	spin_unlock_irqrestore(&net_schedule_list_lock, flags);
+	spin_unlock_irqrestore(&netbk->tx.schedule_list_lock, flags);
 }
 
 /*
@@ -987,15 +964,17 @@ void netif_schedule_work(netif_t *netif)
 
 	if (more_to_do && likely(!netif->busted)) {
 		add_to_net_schedule_list_tail(netif);
-		maybe_schedule_tx_action();
+		maybe_schedule_tx_action(netif);
 	}
 }
 
 void netif_deschedule_work(netif_t *netif)
 {
-	spin_lock_irq(&net_schedule_list_lock);
+	struct xen_netbk *netbk = &xen_netbk[GET_GROUP_INDEX(netif)];
+
+	spin_lock_irq(&netbk->tx.schedule_list_lock);
 	remove_from_net_schedule_list(netif);
-	spin_unlock_irq(&net_schedule_list_lock);
+	spin_unlock_irq(&netbk->tx.schedule_list_lock);
 }
 
 
@@ -1024,17 +1003,19 @@ static void tx_credit_callback(unsigned
 	netif_schedule_work(netif);
 }
 
-static inline int copy_pending_req(PEND_RING_IDX pending_idx)
+static inline int copy_pending_req(struct xen_netbk *netbk,
+				   pending_ring_idx_t pending_idx)
 {
-	return gnttab_copy_grant_page(grant_tx_handle[pending_idx],
-				      &mmap_pages[pending_idx]);
+	return gnttab_copy_grant_page(netbk->tx.pending_info[pending_idx].grant_handle,
+				      &netbk->tx.mmap_pages[pending_idx]);
 }
 
-static void permute_dealloc_ring(PEND_RING_IDX dc, PEND_RING_IDX dp)
+static void permute_dealloc_ring(u16 *dealloc_ring, pending_ring_idx_t dc,
+				 pending_ring_idx_t dp)
 {
 	static unsigned random_src = 0x12345678;
 	unsigned dst_offset;
-	PEND_RING_IDX dest;
+	pending_ring_idx_t dest;
 	u16 tmp;
 
 	while (dc != dp) {
@@ -1049,67 +1030,71 @@ static void permute_dealloc_ring(PEND_RI
 	}
 }
 
-inline static void net_tx_action_dealloc(void)
+static inline void net_tx_action_dealloc(struct xen_netbk *netbk)
 {
+	struct pending_tx_info *pending_tx_info = netbk->tx.pending_info;
 	struct netbk_tx_pending_inuse *inuse, *n;
 	gnttab_unmap_grant_ref_t *gop;
 	u16 pending_idx;
-	PEND_RING_IDX dc, dp;
+	pending_ring_idx_t dc, dp;
 	netif_t *netif, *notify_head = NULL, *notify_tail = NULL;
 	LIST_HEAD(list);
 
-	dc = dealloc_cons;
-	gop = tx.unmap_ops;
+	dc = netbk->tx.dealloc_cons;
+	gop = netbk->tx.unmap_ops;
 
 	/*
 	 * Free up any grants we have finished using
 	 */
 	do {
-		dp = dealloc_prod;
+		dp = netbk->tx.dealloc_prod;
 
 		/* Ensure we see all indices enqueued by netif_idx_release(). */
 		smp_rmb();
 
 		if (MODPARM_permute_returns)
-			permute_dealloc_ring(dc, dp);
+			permute_dealloc_ring(netbk->tx.dealloc_ring, dc, dp);
 
 		while (dc != dp) {
 			unsigned long pfn;
+			struct netbk_tx_pending_inuse *pending_inuse =
+					netbk->tx.pending_inuse;
 
-			pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
+			pending_idx = netbk->tx.dealloc_ring[MASK_PEND_IDX(dc++)];
 			list_move_tail(&pending_inuse[pending_idx].list, &list);
 
-			pfn = idx_to_pfn(pending_idx);
+			pfn = idx_to_pfn(netbk, pending_idx);
 			/* Already unmapped? */
 			if (!phys_to_machine_mapping_valid(pfn))
 				continue;
 
-			gnttab_set_unmap_op(gop, idx_to_kaddr(pending_idx),
+			gnttab_set_unmap_op(gop, idx_to_kaddr(netbk, pending_idx),
 					    GNTMAP_host_map,
-					    grant_tx_handle[pending_idx]);
+					    netbk->tx.pending_info[pending_idx].grant_handle);
 			gop++;
 		}
 
-	} while (dp != dealloc_prod);
+	} while (dp != netbk->tx.dealloc_prod);
 
-	dealloc_cons = dc;
+	netbk->tx.dealloc_cons = dc;
 
 	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
-				      tx.unmap_ops, gop - tx.unmap_ops))
+				      netbk->tx.unmap_ops,
+				      gop - netbk->tx.unmap_ops))
 		BUG();
 
 	/* Copy any entries that have been pending for too long. */
 	if (netbk_copy_skb_mode == NETBK_DELAYED_COPY_SKB &&
-	    !list_empty(&pending_inuse_head)) {
-		list_for_each_entry_safe(inuse, n, &pending_inuse_head, list) {
+	    !list_empty(&netbk->tx.pending_inuse_head)) {
+		list_for_each_entry_safe(inuse, n, &netbk->tx.pending_inuse_head, list) {
 			if (time_after(inuse->alloc_time + HZ / 2, jiffies))
 				break;
 
-			pending_idx = inuse - pending_inuse;
+			pending_idx = inuse - netbk->tx.pending_inuse;
 
 			pending_tx_info[pending_idx].netif->nr_copied_skbs++;
 
-			switch (copy_pending_req(pending_idx)) {
+			switch (copy_pending_req(netbk, pending_idx)) {
 			case 0:
 				list_move_tail(&inuse->list, &list);
 				continue;
@@ -1125,8 +1110,7 @@ inline static void net_tx_action_dealloc
 	}
 
 	list_for_each_entry_safe(inuse, n, &list, list) {
-		pending_idx = inuse - pending_inuse;
-
+		pending_idx = inuse - netbk->tx.pending_inuse;
 		netif = pending_tx_info[pending_idx].netif;
 
 		if (!make_tx_response(netif, &pending_tx_info[pending_idx].req,
@@ -1136,15 +1120,17 @@ inline static void net_tx_action_dealloc
 			notify_head = netif;
 
 		/* Ready for next use. */
-		gnttab_reset_grant_page(mmap_pages[pending_idx]);
+		gnttab_reset_grant_page(netbk->tx.mmap_pages[pending_idx]);
 
-		pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
+		netbk->tx.pending_ring[MASK_PEND_IDX(netbk->tx.pending_prod++)] =
+			pending_idx;
 
 		list_del_init(&inuse->list);
 	}
 	if (notify_head)
-		flush_notify_list(notify_head, TX_IDX, tx.mcl,
-				  sizeof(tx) / sizeof(tx.mcl[0]));
+		flush_notify_list(notify_head, TX_IDX, netbk->tx.mcl,
+				  sizeof(netbk->tx.map_ops)
+				  / sizeof(*netbk->tx.mcl));
 }
 
 static void netbk_tx_err(netif_t *netif, netif_tx_request_t *txp, RING_IDX end)
@@ -1298,19 +1284,23 @@ static void netbk_fill_tx_copy(const net
 	gop->ptr += gop->copy->len;
 }
 
-void netbk_get_requests(netif_t *netif, struct sk_buff *skb,
-			netif_tx_request_t *txp, struct netbk_tx_gop *gop)
+void netbk_get_requests(struct xen_netbk *netbk, netif_t *netif,
+			struct sk_buff *skb, struct netbk_tx_gop *gop)
 {
+	netif_tx_request_t *txp = netbk->tx.slots;
+	struct pending_tx_info *pending_tx_info = netbk->tx.pending_info;
 	struct skb_shared_info *shinfo = skb_shinfo(skb);
 	skb_frag_t *frags = shinfo->frags;
 	u16 pending_idx = netbk_tx_cb(skb)->pending_idx[0];
+	pending_ring_idx_t index;
 	int i, start;
 
 	/* Skip first skb fragment if it is on same page as header fragment. */
 	start = (frag_get_pending_idx(frags) == pending_idx);
 
 	for (i = 0; i < netbk_tx_cb(skb)->copy_slots; ++i, txp++) {
-		pending_idx = pending_ring[MASK_PEND_IDX(pending_cons++)];
+		index = MASK_PEND_IDX(netbk->tx.pending_cons++);
+		pending_idx = netbk->tx.pending_ring[index];
 
 		netbk_fill_tx_copy(txp, gop, netif->domid);
 
@@ -1321,9 +1311,10 @@ void netbk_get_requests(netif_t *netif,
 	}
 
 	for (i = start; i < shinfo->nr_frags; i++, txp++) {
-		pending_idx = pending_ring[MASK_PEND_IDX(pending_cons++)];
+		index = MASK_PEND_IDX(netbk->tx.pending_cons++);
+		pending_idx = netbk->tx.pending_ring[index];
 
-		gnttab_set_map_op(gop->map++, idx_to_kaddr(pending_idx),
+		gnttab_set_map_op(gop->map++, idx_to_kaddr(netbk, pending_idx),
 				  GNTMAP_host_map | GNTMAP_readonly,
 				  txp->gref, netif->domid);
 
@@ -1339,14 +1330,16 @@ void netbk_get_requests(netif_t *netif,
 				     gop->map, gop->copy);
 }
 
-static int netbk_tx_check_gop(struct sk_buff *skb,
+static int netbk_tx_check_gop(struct xen_netbk *netbk, struct sk_buff *skb,
 			      struct netbk_tx_gop *gop, bool hdr_copied)
 {
 	gnttab_copy_t *cop = gop->copy;
 	gnttab_map_grant_ref_t *mop = gop->map;
 	u16 pending_idx = netbk_tx_cb(skb)->pending_idx[0];
+	pending_ring_idx_t index;
+	struct pending_tx_info *pending_tx_info = netbk->tx.pending_info;
 	netif_t *netif = pending_tx_info[pending_idx].netif;
-	netif_tx_request_t *txp;
+	netif_tx_request_t *txp = &pending_tx_info[pending_idx].req;
 	struct skb_shared_info *shinfo = skb_shinfo(skb);
 	int nr_frags = shinfo->nr_frags;
 	int i, err, start;
@@ -1354,7 +1347,6 @@ static int netbk_tx_check_gop(struct sk_
 	/* Check status of header. */
 	if (hdr_copied) {
 		err = (--cop)->status;
-		txp = &pending_tx_info[pending_idx].req;
 		if (txp->size > cop->len)
 			cmpxchg_local(&err, GNTST_okay, (--cop)->status);
 		if (!make_tx_response(netif, txp,
@@ -1364,17 +1356,19 @@ static int netbk_tx_check_gop(struct sk_
 			netif_put(netif);
 		else if (!gop->notify.head)
 			gop->notify.head = netif;
-		pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
+		index = MASK_PEND_IDX(netbk->tx.pending_prod++);
+		netbk->tx.pending_ring[index] = pending_idx;
 	} else if (unlikely((err = mop->status) != GNTST_okay)) {
 		++mop;
-		txp = &pending_tx_info[pending_idx].req;
 		make_tx_response(netif, txp, XEN_NETIF_RSP_ERROR, NULL);
-		pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
+		index = MASK_PEND_IDX(netbk->tx.pending_prod++);
+		netbk->tx.pending_ring[index] = pending_idx;
 		netif_put(netif);
 	} else {
-		set_phys_to_machine(idx_to_pfn(pending_idx),
+		set_phys_to_machine(idx_to_pfn(netbk, pending_idx),
 			FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT));
-		grant_tx_handle[pending_idx] = mop++->handle;
+		container_of(txp, struct pending_tx_info, req)->grant_handle
+			= mop++->handle;
 	}
 
 	/* Skip first skb fragment if it is on same page as header fragment. */
@@ -1395,30 +1389,33 @@ static int netbk_tx_check_gop(struct sk_
 		else if (!gop->notify.head)
 			gop->notify.head = netif;
 		cmpxchg_local(&err, GNTST_okay, newerr);
-		pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
+		index = MASK_PEND_IDX(netbk->tx.pending_prod++);
+		netbk->tx.pending_ring[index] = pending_idx;
 	}
 
 	for (i = start; i < nr_frags; i++, mop++) {
 		int j, newerr;
 
 		pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
+		txp = &pending_tx_info[pending_idx].req;
 
 		/* Check error status: if okay then remember grant handle. */
 		newerr = mop->status;
 		if (likely(newerr == GNTST_okay)) {
-			set_phys_to_machine(idx_to_pfn(pending_idx),
+			set_phys_to_machine(idx_to_pfn(netbk, pending_idx),
 				FOREIGN_FRAME(mop->dev_bus_addr>>PAGE_SHIFT));
-			grant_tx_handle[pending_idx] = mop->handle;
+			container_of(txp, struct pending_tx_info, req)->grant_handle
+				= mop->handle;
 			/* Had a previous error? Invalidate this fragment. */
 			if (unlikely(err != GNTST_okay))
-				netif_idx_release(pending_idx);
+				netif_idx_release(netbk, pending_idx);
 			continue;
 		}
 
 		/* Error on this fragment: respond to client with an error. */
-		txp = &pending_tx_info[pending_idx].req;
 		make_tx_response(netif, txp, XEN_NETIF_RSP_ERROR, NULL);
-		pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
+		index = MASK_PEND_IDX(netbk->tx.pending_prod++);
+		netbk->tx.pending_ring[index] = pending_idx;
 		netif_put(netif);
 
 		/* Not the first error? Preceding frags already invalidated. */
@@ -1428,11 +1425,11 @@ static int netbk_tx_check_gop(struct sk_
 		/* First error: invalidate header and preceding fragments. */
 		if (!hdr_copied) {
 			pending_idx = netbk_tx_cb(skb)->pending_idx[0];
-			netif_idx_release(pending_idx);
+			netif_idx_release(netbk, pending_idx);
 		}
 		for (j = start; j < i; j++) {
 			pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
-			netif_idx_release(pending_idx);
+			netif_idx_release(netbk, pending_idx);
 		}
 
 		/* Remember the error: invalidate all subsequent fragments. */
@@ -1447,7 +1444,7 @@ static int netbk_tx_check_gop(struct sk_
 	return err;
 }
 
-static void netbk_fill_frags(struct sk_buff *skb)
+static void netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
 {
 	struct skb_shared_info *shinfo = skb_shinfo(skb);
 	int nr_frags = shinfo->nr_frags;
@@ -1457,12 +1454,12 @@ static void netbk_fill_frags(struct sk_b
 		netif_tx_request_t *txp;
 		u16 pending_idx = frag_get_pending_idx(shinfo->frags + i);
 
-		pending_inuse[pending_idx].alloc_time = jiffies;
-		list_add_tail(&pending_inuse[pending_idx].list,
-			      &pending_inuse_head);
+		netbk->tx.pending_inuse[pending_idx].alloc_time = jiffies;
+		list_add_tail(&netbk->tx.pending_inuse[pending_idx].list,
+			      &netbk->tx.pending_inuse_head);
 
-		txp = &pending_tx_info[pending_idx].req;
-		__skb_fill_page_desc(skb, i, mmap_pages[pending_idx],
+		txp = &netbk->tx.pending_info[pending_idx].req;
+		__skb_fill_page_desc(skb, i, netbk->tx.mmap_pages[pending_idx],
 				     txp->offset, txp->size);
 
 		skb->len += txp->size;
@@ -1530,8 +1527,9 @@ static int netbk_set_skb_gso(netif_t *ne
 }
 
 /* Called after netfront has transmitted */
-static void net_tx_action(unsigned long unused)
+static void net_tx_action(unsigned long group)
 {
+	struct xen_netbk *netbk = &xen_netbk[group];
 	struct sk_buff *skb;
 	netif_t *netif;
 	netif_tx_request_t txreq, *txslot;
@@ -1545,14 +1543,14 @@ static void net_tx_action(unsigned long
 
 	BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct netbk_tx_cb));
 
-	net_tx_action_dealloc();
+	net_tx_action_dealloc(netbk);
 
-	gop.map = tx.map_ops;
-	gop.copy = tx.copy_ops + ARRAY_SIZEOF(tx.copy_ops);
-	while (NR_PENDING_REQS + XEN_NETIF_NR_SLOTS_MIN < MAX_PENDING_REQS
-	       && !list_empty(&net_schedule_list)) {
+	gop.map = netbk->tx.map_ops;
+	gop.copy = &netbk->tx.copy_op + 1;
+	while (nr_pending_reqs(netbk) + XEN_NETIF_NR_SLOTS_MIN < MAX_PENDING_REQS
+	       && !list_empty(&netbk->tx.schedule_list)) {
 		/* Get a netif from the list with work to do. */
-		netif = poll_net_schedule_list();
+		netif = poll_net_schedule_list(netbk);
 		/*
 		 * This can sometimes happen because the test of
 		 * list_empty(net_schedule_list) at the top of the
@@ -1657,7 +1655,7 @@ static void net_tx_action(unsigned long
 			continue;
 		}
 
-		pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
+		pending_idx = netbk->tx.pending_ring[MASK_PEND_IDX(netbk->tx.pending_cons)];
 
 		data_len = (txreq.size > PKT_PROT_LEN &&
 			    ret < MAX_SKB_FRAGS) ?
@@ -1693,9 +1691,8 @@ static void net_tx_action(unsigned long
 			}
 		}
 
-		memcpy(&pending_tx_info[pending_idx].req,
-		       &txreq, sizeof(txreq));
-		pending_tx_info[pending_idx].netif = netif;
+		netbk->tx.pending_info[pending_idx].req = txreq;
+		netbk->tx.pending_info[pending_idx].netif = netif;
 		netbk_tx_cb(skb)->pending_idx[0] = pending_idx;
 		netbk_tx_cb(skb)->copy_slots = txslot - netbk->tx.slots;
 
@@ -1704,7 +1701,8 @@ static void net_tx_action(unsigned long
 
 		skb_shinfo(skb)->nr_frags = ret;
 		if (data_len < txreq.size) {
-			gnttab_set_map_op(gop.map++, idx_to_kaddr(pending_idx),
+			gnttab_set_map_op(gop.map++,
+					  idx_to_kaddr(netbk, pending_idx),
 					  GNTMAP_host_map | GNTMAP_readonly,
 					  txreq.gref, netif->domid);
 			skb_shinfo(skb)->nr_frags++;
@@ -1714,17 +1712,17 @@ static void net_tx_action(unsigned long
 		}
 		frag_set_pending_idx(skb_shinfo(skb)->frags, pending_idx);
 
-		__skb_queue_tail(&tx_queue, skb);
+		__skb_queue_tail(&netbk->tx.queue, skb);
 
-		pending_cons++;
+		netbk->tx.pending_cons++;
 
-		netbk_get_requests(netif, skb, netbk->tx.slots, &gop);
+		netbk_get_requests(netbk, netif, skb, &gop);
 
 		netif->tx.req_cons = i;
 		netif_schedule_work(netif);
 	}
 
-	if (skb_queue_empty(&tx_queue))
+	if (skb_queue_empty(&netbk->tx.queue))
 		goto out;
 
     /* NOTE: some maps may fail with GNTST_eagain, which could be successfully
@@ -1733,28 +1731,28 @@ static void net_tx_action(unsigned long
      * because it is unlikely that a network buffer will be paged out or shared,
      * and therefore it is unlikely to fail with GNTST_eagain. */
 	MULTI_grant_table_op(&mcl[0], GNTTABOP_copy, gop.copy,
-			     tx.copy_ops + ARRAY_SIZE(tx.copy_ops) - gop.copy);
+			     &netbk->tx.copy_op + 1 - gop.copy);
 	MULTI_grant_table_op(&mcl[1], GNTTABOP_map_grant_ref,
-			     tx.map_ops, gop.map - tx.map_ops);
+			     netbk->tx.map_ops, gop.map - netbk->tx.map_ops);
 	if (HYPERVISOR_multicall_check(mcl, 2, NULL))
 		BUG();
 
-	gop.map = tx.map_ops;
-	gop.copy = tx.copy_ops + ARRAY_SIZE(tx.copy_ops);
+	gop.map = netbk->tx.map_ops;
+	gop.copy = &netbk->tx.copy_op + 1;
 	gop.notify.head = NULL;
 	gop.notify.tail = NULL;
-	while ((skb = __skb_dequeue(&tx_queue)) != NULL) {
+	while ((skb = __skb_dequeue(&netbk->tx.queue)) != NULL) {
 		struct net_device *dev;
 		netif_tx_request_t *txp;
 
 		pending_idx = netbk_tx_cb(skb)->pending_idx[0];
-		netif       = pending_tx_info[pending_idx].netif;
+		netif       = netbk->tx.pending_info[pending_idx].netif;
 		dev         = netif->dev;
-		txp         = &pending_tx_info[pending_idx].req;
+		txp         = &netbk->tx.pending_info[pending_idx].req;
 		data_len    = skb->len;
 
 		/* Check the remap/copy error code. */
-		if (unlikely(netbk_tx_check_gop(skb, &gop,
+		if (unlikely(netbk_tx_check_gop(netbk, skb, &gop,
 						data_len >= txp->size))) {
 			netdev_dbg(dev, "netback grant failed.\n");
 			skb_shinfo(skb)->nr_frags = 0;
@@ -1765,7 +1763,8 @@ static void net_tx_action(unsigned long
 
 		if (data_len < txp->size) {
 			memcpy(skb->data,
-			       (void *)(idx_to_kaddr(pending_idx) + txp->offset),
+			       (void *)(idx_to_kaddr(netbk, pending_idx)
+					+ txp->offset),
 			       data_len);
 			/* Append the packet payload as a fragment. */
 			txp->offset += data_len;
@@ -1779,7 +1778,7 @@ static void net_tx_action(unsigned long
 		else
 			skb->ip_summed = CHECKSUM_NONE;
 
-		netbk_fill_frags(skb);
+		netbk_fill_frags(netbk, skb);
 
 		/*
 		 * If the initial fragment was < PKT_PROT_LEN then
@@ -1808,41 +1807,44 @@ static void net_tx_action(unsigned long
 	}
 
 	if (gop.notify.head)
-		flush_notify_list(gop.notify.head, TX_IDX, tx.mcl,
-				  sizeof(tx) / sizeof(tx.mcl[0]));
+		flush_notify_list(gop.notify.head, TX_IDX, netbk->tx.mcl,
+				  sizeof(netbk->tx.map_ops)
+				  / sizeof(*netbk->tx.mcl));
 
  out:
 	if (netbk_copy_skb_mode == NETBK_DELAYED_COPY_SKB &&
-	    !list_empty(&pending_inuse_head)) {
+	    !list_empty(&netbk->tx.pending_inuse_head)) {
 		struct netbk_tx_pending_inuse *oldest;
 
-		oldest = list_entry(pending_inuse_head.next,
+		oldest = list_entry(netbk->tx.pending_inuse_head.next,
 				    struct netbk_tx_pending_inuse, list);
-		mod_timer(&netbk_tx_pending_timer, oldest->alloc_time + HZ);
+		mod_timer(&netbk->tx.pending_timer, oldest->alloc_time + HZ);
 	}
 }
 
-static void netif_idx_release(u16 pending_idx)
+static void netif_idx_release(struct xen_netbk *netbk, u16 pending_idx)
 {
-	static DEFINE_SPINLOCK(_lock);
 	unsigned long flags;
 
-	spin_lock_irqsave(&_lock, flags);
-	dealloc_ring[MASK_PEND_IDX(dealloc_prod)] = pending_idx;
+	spin_lock_irqsave(&netbk->tx.release_lock, flags);
+	netbk->tx.dealloc_ring[MASK_PEND_IDX(netbk->tx.dealloc_prod)] = pending_idx;
 	/* Sync with net_tx_action_dealloc: insert idx /then/ incr producer. */
 	smp_wmb();
-	dealloc_prod++;
-	spin_unlock_irqrestore(&_lock, flags);
+	netbk->tx.dealloc_prod++;
+	spin_unlock_irqrestore(&netbk->tx.release_lock, flags);
 
-	tasklet_schedule(&net_tx_tasklet);
+	tasklet_schedule(&netbk->tx.tasklet);
 }
 
 static void netif_page_release(struct page *page, unsigned int order)
 {
-	int idx = netif_page_index(page);
+	struct xen_netbk *netbk;
+	unsigned int idx;
+
 	BUG_ON(order);
-	BUG_ON(idx < 0);
-	netif_idx_release(idx);
+	netif_get_page_ext(page, netbk, idx);
+	BUG_ON(!netbk);
+	netif_idx_release(netbk, idx);
 }
 
 irqreturn_t netif_be_int(int irq, void *dev_id)
@@ -1850,7 +1852,7 @@ irqreturn_t netif_be_int(int irq, void *
 	netif_t *netif = dev_id;
 
 	add_to_net_schedule_list_tail(netif);
-	maybe_schedule_tx_action();
+	maybe_schedule_tx_action(netif);
 
 	if (netif_schedulable(netif) && !netbk_queue_full(netif))
 		netif_wake_queue(netif->dev);
@@ -1923,33 +1925,38 @@ static netif_rx_response_t *make_rx_resp
 #ifdef NETBE_DEBUG_INTERRUPT
 static irqreturn_t netif_be_dbg(int irq, void *dev_id)
 {
-	struct list_head *ent;
 	netif_t *netif;
-	int i = 0;
+	unsigned int i = 0, group;
 
 	pr_alert("netif_schedule_list:\n");
-	spin_lock_irq(&net_schedule_list_lock);
 
-	list_for_each (ent, &net_schedule_list) {
-		netif = list_entry(ent, netif_t, list);
-		pr_alert(" %d: private(rx_req_cons=%08x "
-			 "rx_resp_prod=%08x\n",
-			 i, netif->rx.req_cons, netif->rx.rsp_prod_pvt);
-		pr_alert("   tx_req_cons=%08x tx_resp_prod=%08x)\n",
-			 netif->tx.req_cons, netif->tx.rsp_prod_pvt);
-		pr_alert("   shared(rx_req_prod=%08x "
-			 "rx_resp_prod=%08x\n",
-			 netif->rx.sring->req_prod, netif->rx.sring->rsp_prod);
-		pr_alert("   rx_event=%08x tx_req_prod=%08x\n",
-			 netif->rx.sring->rsp_event,
-			 netif->tx.sring->req_prod);
-		pr_alert("   tx_resp_prod=%08x, tx_event=%08x)\n",
-			 netif->tx.sring->rsp_prod,
-			 netif->tx.sring->rsp_event);
-		i++;
+	for (group = 0; group < netbk_nr_groups; ++group) {
+		struct xen_netbk *netbk = &xen_netbk[group];
+
+		spin_lock_irq(&netbk->tx.schedule_list_lock);
+
+		list_for_each_entry(netif, &netbk->tx.schedule_list, list) {
+			pr_alert(" %d: private(rx_req_cons=%08x "
+				 "rx_resp_prod=%08x\n", i,
+				 netif->rx.req_cons, netif->rx.rsp_prod_pvt);
+			pr_alert("   tx_req_cons=%08x tx_resp_prod=%08x)\n",
+				 netif->tx.req_cons, netif->tx.rsp_prod_pvt);
+			pr_alert("   shared(rx_req_prod=%08x "
+				 "rx_resp_prod=%08x\n",
+				 netif->rx.sring->req_prod,
+				 netif->rx.sring->rsp_prod);
+			pr_alert("   rx_event=%08x tx_req_prod=%08x\n",
+				 netif->rx.sring->rsp_event,
+				 netif->tx.sring->req_prod);
+			pr_alert("   tx_resp_prod=%08x, tx_event=%08x)\n",
+				 netif->tx.sring->rsp_prod,
+				 netif->tx.sring->rsp_event);
+			i++;
+		}
+
+		spin_unlock_irq(&netbk->tx.schedule_list_lock);
 	}
 
-	spin_unlock_irq(&net_schedule_list_lock);
 	pr_alert(" ** End of netif_schedule_list **\n");
 
 	return IRQ_HANDLED;
@@ -1964,7 +1971,8 @@ static struct irqaction netif_be_dbg_act
 
 static int __init netback_init(void)
 {
-	int i;
+	unsigned int i, group;
+	int rc;
 	struct page *page;
 
 	if (!is_running_on_xen())
@@ -1977,40 +1985,55 @@ static int __init netback_init(void)
 		max_tx_slots = XEN_NETIF_NR_SLOTS_MIN;
 	}
 
+	xen_netbk = vzalloc(netbk_nr_groups * sizeof(*xen_netbk));
+	if (!xen_netbk)
+		return -ENOMEM;
+
 	/* We can increase reservation by this much in net_rx_action(). */
-	balloon_update_driver_allowance(NET_RX_RING_SIZE);
+	balloon_update_driver_allowance(netbk_nr_groups * NET_RX_RING_SIZE);
 
-	skb_queue_head_init(&rx_queue);
-	skb_queue_head_init(&tx_queue);
+	for (group = 0; group < netbk_nr_groups; group++) {
+		struct xen_netbk *netbk = &xen_netbk[group];
 
-	init_timer(&net_timer);
-	net_timer.data = 0;
-	net_timer.function = net_alarm;
-
-	init_timer(&netbk_tx_pending_timer);
-	netbk_tx_pending_timer.data = 0;
-	netbk_tx_pending_timer.function = netbk_tx_pending_timeout;
-
-	mmap_pages = alloc_empty_pages_and_pagevec(MAX_PENDING_REQS);
-	if (mmap_pages == NULL) {
-		pr_err("%s: out of memory\n", __FUNCTION__);
-		return -ENOMEM;
-	}
+		tasklet_init(&netbk->tx.tasklet, net_tx_action, group);
+		tasklet_init(&netbk->rx.tasklet, net_rx_action, group);
 
-	for (i = 0; i < MAX_PENDING_REQS; i++) {
-		page = mmap_pages[i];
-		SetPageForeign(page, netif_page_release);
-		netif_set_page_index(page, i);
-		INIT_LIST_HEAD(&pending_inuse[i].list);
-	}
+		skb_queue_head_init(&netbk->rx.queue);
+		skb_queue_head_init(&netbk->tx.queue);
+
+		init_timer(&netbk->rx.timer);
+		netbk->rx.timer.data = group;
+		netbk->rx.timer.function = net_alarm;
+
+		init_timer(&netbk->tx.pending_timer);
+		netbk->tx.pending_timer.data = group;
+		netbk->tx.pending_timer.function =
+			netbk_tx_pending_timeout;
 
-	pending_cons = 0;
-	pending_prod = MAX_PENDING_REQS;
-	for (i = 0; i < MAX_PENDING_REQS; i++)
-		pending_ring[i] = i;
+		netbk->tx.pending_prod = MAX_PENDING_REQS;
 
-	spin_lock_init(&net_schedule_list_lock);
-	INIT_LIST_HEAD(&net_schedule_list);
+		INIT_LIST_HEAD(&netbk->tx.pending_inuse_head);
+		INIT_LIST_HEAD(&netbk->tx.schedule_list);
+
+		spin_lock_init(&netbk->tx.schedule_list_lock);
+		spin_lock_init(&netbk->tx.release_lock);
+
+		netbk->tx.mmap_pages =
+			alloc_empty_pages_and_pagevec(MAX_PENDING_REQS);
+		if (netbk->tx.mmap_pages == NULL) {
+			pr_err("%s: out of memory\n", __func__);
+			rc = -ENOMEM;
+			goto failed_init;
+		}
+
+		for (i = 0; i < MAX_PENDING_REQS; i++) {
+			page = netbk->tx.mmap_pages[i];
+			SetPageForeign(page, netif_page_release);
+			netif_set_page_ext(page, group, i);
+			netbk->tx.pending_ring[i] = i;
+			INIT_LIST_HEAD(&netbk->tx.pending_inuse[i].list);
+		}
+	}
 
 	netbk_copy_skb_mode = NETBK_DONT_COPY_SKB;
 	if (MODPARM_copy_skb) {
@@ -2034,6 +2057,19 @@ static int __init netback_init(void)
 #endif
 
 	return 0;
+
+failed_init:
+	while (group-- > 0) {
+		struct xen_netbk *netbk = &xen_netbk[group];
+
+		free_empty_pages_and_pagevec(netbk->tx.mmap_pages,
+					     MAX_PENDING_REQS);
+	}
+	vfree(xen_netbk);
+	balloon_update_driver_allowance(-(long)netbk_nr_groups
+					* NET_RX_RING_SIZE);
+
+	return rc;
 }
 
 module_init(netback_init);
