From: Brian King Date: Fri, 8 Jun 2007 19:05:16 +0000 (-0500) Subject: ibmveth: Fix h_free_logical_lan error on pool resize X-Git-Tag: v2.6.22-rc5~44^2~15 X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=4aa9c93e1c7911866c546651a5efbbf62914092e;p=linux-2.6 ibmveth: Fix h_free_logical_lan error on pool resize When attempting to activate additional rx buffer pools on an ibmveth interface that was not yet up, the error below was seen. The patch fixes this by only closing and opening the interface to activate the resize if the interface is already opened. (drivers/net/ibmveth.c:597 ua:30000004) ERROR: h_free_logical_lan failed with fffffffffffffffc, continuing with close Unable to handle kernel paging request for data at address 0x00000ff8 Faulting instruction address: 0xd0000000002540e0 Oops: Kernel access of bad area, sig: 11 [#1] SMP NR_CPUS=128 NUMA PSERIES LPAR Modules linked in: ip6t_REJECT xt_tcpudp ipt_REJECT xt_state iptable_mangle ipta ble_nat ip_nat iptable_filter ip6table_mangle ip_conntrack nfnetlink ip_tables i p6table_filter ip6_tables x_tables ipv6 apparmor aamatch_pcre loop dm_mod ibmvet h sg ibmvscsic sd_mod scsi_mod NIP: D0000000002540E0 LR: D0000000002540D4 CTR: 80000000001AF404 REGS: c00000001cd27870 TRAP: 0300 Not tainted (2.6.16.46-0.4-ppc64) MSR: 8000000000009032 CR: 24242422 XER: 00000007 DAR: 0000000000000FF8, DSISR: 0000000040000000 TASK = c00000001ca7b4e0[1636] 'sh' THREAD: c00000001cd24000 CPU: 0 GPR00: D0000000002540D4 C00000001CD27AF0 D000000000265650 C00000001C936500 GPR04: 8000000000009032 FFFFFFFFFFFFFFFF 0000000000000007 000000000002C2EF GPR08: FFFFFFFFFFFFFFFF 0000000000000000 C000000000652A10 C000000000652AE0 GPR12: 0000000000004000 C0000000004A3300 00000000100A0000 0000000000000000 GPR16: 00000000100B8808 00000000100C0F60 0000000000000000 0000000010084878 GPR20: 0000000000000000 00000000100C0CB0 00000000100AF498 0000000000000002 GPR24: 00000000100BA488 C00000001C936760 D000000000258DD0 C00000001C936000 GPR28: 0000000000000000 C00000001C936500 D000000000265180 C00000001C936000 NIP [D0000000002540E0] .ibmveth_close+0xc8/0xf4 [ibmveth] LR [D0000000002540D4] .ibmveth_close+0xbc/0xf4 [ibmveth] Call Trace: [C00000001CD27AF0] [D0000000002540D4] .ibmveth_close+0xbc/0xf4 [ibmveth] (unreliable) [C00000001CD27B80] [D0000000002545FC] .veth_pool_store+0xd0/0x260 [ibmveth] [C00000001CD27C40] [C00000000012E0E8] .sysfs_write_file+0x118/0x198 [C00000001CD27CF0] [C0000000000CDAF0] .vfs_write+0x130/0x218 [C00000001CD27D90] [C0000000000CE52C] .sys_write+0x4c/0x8c [C00000001CD27E30] [C00000000000871C] syscall_exit+0x0/0x40 Instruction dump: 419affd8 2fa30000 419e0020 e93d0000 e89e8040 38a00255 e87e81b0 80c90018 48001531 e8410028 e93d00e0 7fa3eb78 f81d0430 4bfffdc9 38210090 Signed-off-by: Brian King Signed-off-by: Jeff Garzik --- diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index 3bec0f733f..c04957a7df 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c @@ -1243,16 +1243,19 @@ const char * buf, size_t count) if (attr == &veth_active_attr) { if (value && !pool->active) { - if(ibmveth_alloc_buffer_pool(pool)) { - ibmveth_error_printk("unable to alloc pool\n"); - return -ENOMEM; - } - pool->active = 1; - adapter->pool_config = 1; - ibmveth_close(netdev); - adapter->pool_config = 0; - if ((rc = ibmveth_open(netdev))) - return rc; + if (netif_running(netdev)) { + if(ibmveth_alloc_buffer_pool(pool)) { + ibmveth_error_printk("unable to alloc pool\n"); + return -ENOMEM; + } + pool->active = 1; + adapter->pool_config = 1; + ibmveth_close(netdev); + adapter->pool_config = 0; + if ((rc = ibmveth_open(netdev))) + return rc; + } else + pool->active = 1; } else if (!value && pool->active) { int mtu = netdev->mtu + IBMVETH_BUFF_OH; int i; @@ -1281,23 +1284,29 @@ const char * buf, size_t count) if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) return -EINVAL; else { - adapter->pool_config = 1; - ibmveth_close(netdev); - adapter->pool_config = 0; - pool->size = value; - if ((rc = ibmveth_open(netdev))) - return rc; + if (netif_running(netdev)) { + adapter->pool_config = 1; + ibmveth_close(netdev); + adapter->pool_config = 0; + pool->size = value; + if ((rc = ibmveth_open(netdev))) + return rc; + } else + pool->size = value; } } else if (attr == &veth_size_attr) { if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) return -EINVAL; else { - adapter->pool_config = 1; - ibmveth_close(netdev); - adapter->pool_config = 0; - pool->buff_size = value; - if ((rc = ibmveth_open(netdev))) - return rc; + if (netif_running(netdev)) { + adapter->pool_config = 1; + ibmveth_close(netdev); + adapter->pool_config = 0; + pool->buff_size = value; + if ((rc = ibmveth_open(netdev))) + return rc; + } else + pool->buff_size = value; } }