From varnish-bugs at varnish-cache.org Wed Jul 2 09:16:37 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Wed, 02 Jul 2014 09:16:37 -0000 Subject: [Varnish] #1542: Child (21196) Panic message:#012Assert error in Lck__Lock(), cache/cache_lck.c line 70 Message-ID: <047.f514766cb40ad2ae55372f101be48ab1@varnish-cache.org> #1542: Child (21196) Panic message:#012Assert error in Lck__Lock(), cache/cache_lck.c line 70 ---------------------------------+---------------------- Reporter: cmartinez | Type: defect Status: new | Priority: normal Milestone: Varnish 4.0 release | Component: varnishd Version: 4.0.1 | Severity: critical Keywords: | ---------------------------------+---------------------- we upgraded varnish from 4.0.0 to try to fix this issue but is still reproducing Varnish restarts his childs with signal 11. debug info: varnish> panic.show 200 Last panic at: Wed, 02 Jul 2014 08:19:22 GMT Assert error in Lck__Lock(), cache/cache_lck.c line 71: Condition((pthread_mutex_lock(&ilck->mtx)) == 0) not true. thread = (cache-worker) ident = Linux,2.6.32-431.el6.x86_64,x86_64,-smalloc,-smalloc,-hcritbit,epoll Backtrace: 0x43b0bd: /usr/sbin/varnishd() [0x43b0bd] 0x43b3cd: /usr/sbin/varnishd() [0x43b3cd] 0x436aba: /usr/sbin/varnishd(Lck__Lock+0xd2) [0x436aba] 0x4184e0: /usr/sbin/varnishd(VBO_waitlen+0x30) [0x4184e0] 0x4436aa: /usr/sbin/varnishd() [0x4436aa] 0x4442ae: /usr/sbin/varnishd(V1D_Deliver+0x597) [0x4442ae] 0x43f697: /usr/sbin/varnishd() [0x43f697] 0x4429f5: /usr/sbin/varnishd(CNT_Request+0x529) [0x4429f5] 0x433a40: /usr/sbin/varnishd(HTTP1_Session+0x429) [0x433a40] 0x44594b: /usr/sbin/varnishd() [0x44594b] req = 0x7f6e3e2bc020 { sp = 0x7f6ff981b8e0, vxid = 1084098135, step = R_STP_DELIVER, req_body = R_BODY_NONE, err_code = 200, err_reason = (null), restarts = 0, esi_level = 0 sp = 0x7f6ff981b8e0 { fd = 40, vxid = 10356310, client = 10.13.3.40 41544, step = S_STP_WORKING, }, worker = 0x7f6e55b4abf0 { ws = 0x7f6e55b4ae08 { id = "wrk", {s,f,r,e} = {0x7f6e55b4a3d0,0x7f6e55b4a3d0,(nil),+2048}, }, VCL::method = 0x0, VCL::return = deliver, }, ws = 0x7f6e3e2bc1b8 { id = "req", {s,f,r,e} = {0x7f6e3e2be010,+416,(nil),+57360}, }, (gdb) bt #0 0x00000000004184eb in VBO_waitlen () #1 0x00000000004436aa in v1d_dorange () #2 0x00000000004442ae in V1D_Deliver () #3 0x000000000043f697 in cnt_deliver () #4 0x00000000004429f5 in CNT_Request () #5 0x0000000000433a40 in HTTP1_Session () #6 0x000000000044594b in ses_req_pool_task () #7 0x0000000000445c16 in ses_sess_pool_task () #8 0x00000000004461c2 in SES_pool_accept_task () #9 0x000000000043e035 in Pool_Work_Thread () #10 0x0000000000456218 in wrk_thread_real () #11 0x0000000000456341 in WRK_thread () #12 0x000000352ba079d1 in start_thread (arg=0x7f6ef489d700) at pthread_create.c:301 #13 0x000000352b6e8b6d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:115 (gdb) frame 12 #12 0x000000352ba079d1 in start_thread (arg=0x7f6ef489d700) at pthread_create.c:301 301 THREAD_SETMEM (pd, result, CALL_THREAD_FCT (pd)); (gdb) print pd $1 = (struct pthread *) 0x7f6ef489d700 (gdb) print *pd $2 = {{header = {tcb = 0x7f6ef489d700, dtv = 0x7f6f76c11850, self = 0x7f6ef489d700, multiple_threads = 1, gscope_flag = 0, sysinfo = 0, stack_guard = 5883901357024911704, pointer_guard = 13223282705651353577, vgetcpu_cache = {0, 0}, private_futex = 128, rtld_must_xmm_save = 0, __private_tm = {0x0, 0x0, 0x0, 0x0, 0x0}, __unused2 = 0, rtld_savespace_sse = {{{ 0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}, {{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}, {{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}, {{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}, {{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}, {{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}, {{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}, {{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}}, __padding = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}}, __padding = {0x7f6ef489d700, 0x7f6f76c11850, 0x7f6ef489d700, 0x1, 0x0, 0x51a7d11c26e8a158, 0xb782898f92ea47e9, 0x0, 0x0, 0x80, 0x0 }}, list = {next = 0x7f6ef48a99c0, prev = 0x7f6ef48919c0}, tid = 21040, pid = 20618, robust_prev = 0x7f6ef489d9e0, robust_head = {list = 0x7f6ef489d9e0, futex_offset = -32, list_op_pending = 0x0}, cleanup = 0x0, cleanup_jmp_buf = 0x7f6ef489cf30, cancelhandling = 0, flags = 0, specific_1stblock = {{seq = 1, data = 0x7f6ef489d6d0}, {seq = 1, data = 0x7f6ef489d6f8}, {seq = 0, data = 0x0}, {seq = 1, data = 0x7f6e5a352020}, {seq = 1, data = 0x0}, {seq = 1, data = 0x48354a}, {seq = 0, data = 0x0} }, specific = {0x7f6ef489da10, 0x0 }, specific_used = true, report_events = false, user_stack = false, stopped_start = false, parent_cancelhandling = 0, lock = 0, setxid_futex = 0, cpuclock_offset = 7204934909057508, joinid = 0x7f6ef489d700, result = 0x0, schedparam = {__sched_priority = 0}, schedpolicy = 0, start_routine = 0x45631a , arg = 0x7f70a0c0e300, eventbuf = {eventmask = {event_bits = {0, 0}}, eventnum = TD_ALL_EVENTS, eventdata = 0x0}, nextevent = 0x0, exc = {exception_class = 0, exception_cleanup = 0, private_1 = 0, private_2 = 0}, stackblock = 0x7f6ef4892000, stackblock_size = 49152, guardsize = 4096, reported_guardsize = 4096, tpp = 0x0, res = {retrans = 0, retry = 0, options = 0, nscount = 0, nsaddr_list = {{sin_family = 0, sin_port = 0, sin_addr = {s_addr = 0}, sin_zero = "\000\000\000\000\000\000\000"}, {sin_family = 0, sin_port = 0, sin_addr = {s_addr = 0}, sin_zero = "\000\000\000\000\000\000\000"}, {sin_family = 0, sin_port = 0, sin_addr = {s_addr = 0}, sin_zero = "\000\000\000\000\000\000\000"}}, id = 0, dnsrch = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, defdname = '\000' , pfcode = 0, ndots = 0, nsort = 0, ipv6_unavail = 0, unused = 0, sort_list = {{addr = { s_addr = 0}, mask = 0}, {addr = {s_addr = 0}, mask = 0}, {addr = {s_addr = 0}, mask = 0}, {addr = {s_addr = 0}, mask = 0}, {addr = {s_addr = 0}, mask = 0}, {addr = {s_addr = 0}, mask = 0}, {addr = {s_addr = 0}, mask = 0}, {addr = { s_addr = 0}, mask = 0}, {addr = {s_addr = 0}, mask = 0}, {addr = {s_addr = 0}, mask = 0}}, qhook = 0, rhook = 0, res_h_errno = 0, _vcsock = 0, _flags = 0, _u = {pad = '\000' , _ext = {nscount = 0, nsmap = {0, 0, 0}, nssocks = {0, 0, 0}, nscount6 = 0, nsinit = 0, nsaddrs = {0x0, 0x0, 0x0}, initstamp = 0}}}, end_padding = 0x7f6ef489d700 ""} (gdb) print result $3 = '\000' -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Thu Jul 3 14:27:38 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Thu, 03 Jul 2014 14:27:38 -0000 Subject: [Varnish] #1539: Assert error in cnt_lookup(), cache/cache_req_fsm.c line 411: In-Reply-To: <047.9e25f0913e70f9b448183830fc7e3ec7@varnish-cache.org> References: <047.9e25f0913e70f9b448183830fc7e3ec7@varnish-cache.org> Message-ID: <062.dd4c03d1865206f89d380b78bf60e49f@varnish-cache.org> #1539: Assert error in cnt_lookup(), cache/cache_req_fsm.c line 411: -----------------------+---------------------------------- Reporter: shing6326 | Owner: Type: defect | Status: new Priority: normal | Milestone: Varnish 4.0 release Component: varnishd | Version: 4.0.1 Severity: normal | Resolution: Keywords: | -----------------------+---------------------------------- Comment (by fgsch): Replying to [comment:1 scoof]: > Looks like a dup of #1522 It is. The ones in 1522 and this, regardless of the line, are asserting on the same check - HSH_Lookup returned an object, we return deliver but the object is dying. -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Fri Jul 4 10:46:10 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Fri, 04 Jul 2014 10:46:10 -0000 Subject: [Varnish] #1543: Panic message: Assert error in Lck_Delete(), cache/cache_lck.c line 198 Message-ID: <047.3f011ce053d5cbd53e3230c73f3d7a63@varnish-cache.org> #1543: Panic message: Assert error in Lck_Delete(), cache/cache_lck.c line 198 ---------------------------------+---------------------- Reporter: WhizzBANG | Type: defect Status: new | Priority: high Milestone: Varnish 4.0 release | Component: varnishd Version: 4.0.1 | Severity: blocker Keywords: | ---------------------------------+---------------------- Hello, System: CentOS[[BR]] Kernel: 2.6.32-358.el6.x86_64[[BR]] Varnish: 4.0.1-1.el6 x86_64[[BR]] CPU: Intel(R) Xeon(R) CPU E5405 at 2.00GHz[[BR]] RAM: 32GB and 26GB for malloc varnish[[BR]] Configuration .vcl builded by me and based on working configuration in varnish3.0.3 {{{ varnish> panic.show 200 Last panic at: Thu, 03 Jul 2014 15:49:42 GMT Assert error in Lck_Delete(), cache/cache_lck.c line 198: Condition((pthread_mutex_destroy(&ilck->mtx)) == 0) not true. thread = (cache-worker) ident = Linux,2.6.32-358.el6.x86_64,x86_64,-smalloc,-smalloc,-hcritbit,epoll }}} backtrace from core-dump: {{{ (gdb) bt #0 0x0000000000436a04 in Lck__Lock () #1 0x00000000004184e0 in VBO_waitlen () #2 0x00000000004436aa in v1d_dorange () #3 0x00000000004442ae in V1D_Deliver () #4 0x000000000043f697 in cnt_deliver () #5 0x00000000004429f5 in CNT_Request () #6 0x0000000000433a40 in HTTP1_Session () #7 0x000000000044594b in ses_req_pool_task () #8 0x0000000000445c16 in ses_sess_pool_task () #9 0x00000000004461c2 in SES_pool_accept_task () #10 0x000000000043e035 in Pool_Work_Thread () #11 0x0000000000456218 in wrk_thread_real () #12 0x0000000000456341 in WRK_thread () #13 0x000000384bc07851 in start_thread (arg=0x7f029968f700) at pthread_create.c:301 #14 0x0000003f762e890d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:115 }}} -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Mon Jul 7 10:06:53 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Mon, 07 Jul 2014 10:06:53 -0000 Subject: [Varnish] #1542: Child (21196) Panic message:#012Assert error in Lck__Lock(), cache/cache_lck.c line 70 In-Reply-To: <047.f514766cb40ad2ae55372f101be48ab1@varnish-cache.org> References: <047.f514766cb40ad2ae55372f101be48ab1@varnish-cache.org> Message-ID: <062.97b985375d0983e94dfba06e9be11465@varnish-cache.org> #1542: Child (21196) Panic message:#012Assert error in Lck__Lock(), cache/cache_lck.c line 70 -----------------------+---------------------------------- Reporter: cmartinez | Owner: Type: defect | Status: new Priority: normal | Milestone: Varnish 4.0 release Component: varnishd | Version: 4.0.1 Severity: critical | Resolution: Keywords: | -----------------------+---------------------------------- Description changed by lkarsten: Old description: > we upgraded varnish from 4.0.0 to try to fix this issue but is still > reproducing > > Varnish restarts his childs with signal 11. > > debug info: > > varnish> panic.show > 200 > Last panic at: Wed, 02 Jul 2014 08:19:22 GMT > Assert error in Lck__Lock(), cache/cache_lck.c line 71: > Condition((pthread_mutex_lock(&ilck->mtx)) == 0) not true. > thread = (cache-worker) > ident = > Linux,2.6.32-431.el6.x86_64,x86_64,-smalloc,-smalloc,-hcritbit,epoll > Backtrace: > 0x43b0bd: /usr/sbin/varnishd() [0x43b0bd] > 0x43b3cd: /usr/sbin/varnishd() [0x43b3cd] > 0x436aba: /usr/sbin/varnishd(Lck__Lock+0xd2) [0x436aba] > 0x4184e0: /usr/sbin/varnishd(VBO_waitlen+0x30) [0x4184e0] > 0x4436aa: /usr/sbin/varnishd() [0x4436aa] > 0x4442ae: /usr/sbin/varnishd(V1D_Deliver+0x597) [0x4442ae] > 0x43f697: /usr/sbin/varnishd() [0x43f697] > 0x4429f5: /usr/sbin/varnishd(CNT_Request+0x529) [0x4429f5] > 0x433a40: /usr/sbin/varnishd(HTTP1_Session+0x429) [0x433a40] > 0x44594b: /usr/sbin/varnishd() [0x44594b] > req = 0x7f6e3e2bc020 { > sp = 0x7f6ff981b8e0, vxid = 1084098135, step = R_STP_DELIVER, > req_body = R_BODY_NONE, > err_code = 200, err_reason = (null), > restarts = 0, esi_level = 0 > sp = 0x7f6ff981b8e0 { > fd = 40, vxid = 10356310, > client = 10.13.3.40 41544, > step = S_STP_WORKING, > }, > worker = 0x7f6e55b4abf0 { > ws = 0x7f6e55b4ae08 { > id = "wrk", > {s,f,r,e} = {0x7f6e55b4a3d0,0x7f6e55b4a3d0,(nil),+2048}, > }, > VCL::method = 0x0, > VCL::return = deliver, > }, > ws = 0x7f6e3e2bc1b8 { > id = "req", > {s,f,r,e} = {0x7f6e3e2be010,+416,(nil),+57360}, > }, > > (gdb) bt > #0 0x00000000004184eb in VBO_waitlen () > #1 0x00000000004436aa in v1d_dorange () > #2 0x00000000004442ae in V1D_Deliver () > #3 0x000000000043f697 in cnt_deliver () > #4 0x00000000004429f5 in CNT_Request () > #5 0x0000000000433a40 in HTTP1_Session () > #6 0x000000000044594b in ses_req_pool_task () > #7 0x0000000000445c16 in ses_sess_pool_task () > #8 0x00000000004461c2 in SES_pool_accept_task () > #9 0x000000000043e035 in Pool_Work_Thread () > #10 0x0000000000456218 in wrk_thread_real () > #11 0x0000000000456341 in WRK_thread () > #12 0x000000352ba079d1 in start_thread (arg=0x7f6ef489d700) at > pthread_create.c:301 > #13 0x000000352b6e8b6d in clone () at > ../sysdeps/unix/sysv/linux/x86_64/clone.S:115 > (gdb) frame 12 > #12 0x000000352ba079d1 in start_thread (arg=0x7f6ef489d700) at > pthread_create.c:301 > 301 THREAD_SETMEM (pd, result, CALL_THREAD_FCT (pd)); > (gdb) print pd > $1 = (struct pthread *) 0x7f6ef489d700 > (gdb) print *pd > $2 = {{header = {tcb = 0x7f6ef489d700, dtv = 0x7f6f76c11850, self = > 0x7f6ef489d700, multiple_threads = 1, gscope_flag = 0, > sysinfo = 0, stack_guard = 5883901357024911704, pointer_guard = > 13223282705651353577, vgetcpu_cache = {0, 0}, > private_futex = 128, rtld_must_xmm_save = 0, __private_tm = {0x0, > 0x0, 0x0, 0x0, 0x0}, __unused2 = 0, rtld_savespace_sse = {{{ > 0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}, {{0, > 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}, {{0, > 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}, {{0, 0, > 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}, {{0, 0, > 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}, {{0, 0, 0, > 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}, {{0, 0, 0, > 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}, {{0, 0, 0, 0}, > {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}}, __padding = { > 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}}, __padding = > {0x7f6ef489d700, 0x7f6f76c11850, 0x7f6ef489d700, 0x1, 0x0, > 0x51a7d11c26e8a158, 0xb782898f92ea47e9, 0x0, 0x0, 0x80, 0x0 > }}, list = {next = 0x7f6ef48a99c0, > prev = 0x7f6ef48919c0}, tid = 21040, pid = 20618, robust_prev = > 0x7f6ef489d9e0, robust_head = {list = 0x7f6ef489d9e0, > futex_offset = -32, list_op_pending = 0x0}, cleanup = 0x0, > cleanup_jmp_buf = 0x7f6ef489cf30, cancelhandling = 0, flags = 0, > specific_1stblock = {{seq = 1, data = 0x7f6ef489d6d0}, {seq = 1, data = > 0x7f6ef489d6f8}, {seq = 0, data = 0x0}, {seq = 1, > data = 0x7f6e5a352020}, {seq = 1, data = 0x0}, {seq = 1, data = > 0x48354a}, {seq = 0, data = 0x0} }, > specific = {0x7f6ef489da10, 0x0 }, specific_used = > true, report_events = false, user_stack = false, > stopped_start = false, parent_cancelhandling = 0, lock = 0, > setxid_futex = 0, cpuclock_offset = 7204934909057508, > joinid = 0x7f6ef489d700, result = 0x0, schedparam = {__sched_priority = > 0}, schedpolicy = 0, > start_routine = 0x45631a , arg = 0x7f70a0c0e300, eventbuf = > {eventmask = {event_bits = {0, 0}}, > eventnum = TD_ALL_EVENTS, eventdata = 0x0}, nextevent = 0x0, exc = > {exception_class = 0, exception_cleanup = 0, private_1 = 0, > private_2 = 0}, stackblock = 0x7f6ef4892000, stackblock_size = 49152, > guardsize = 4096, reported_guardsize = 4096, tpp = 0x0, > res = {retrans = 0, retry = 0, options = 0, nscount = 0, nsaddr_list = > {{sin_family = 0, sin_port = 0, sin_addr = {s_addr = 0}, > sin_zero = "\000\000\000\000\000\000\000"}, {sin_family = 0, > sin_port = 0, sin_addr = {s_addr = 0}, > sin_zero = "\000\000\000\000\000\000\000"}, {sin_family = 0, > sin_port = 0, sin_addr = {s_addr = 0}, > sin_zero = "\000\000\000\000\000\000\000"}}, id = 0, dnsrch = > {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, > defdname = '\000' , pfcode = 0, ndots = 0, nsort = > 0, ipv6_unavail = 0, unused = 0, sort_list = {{addr = { > s_addr = 0}, mask = 0}, {addr = {s_addr = 0}, mask = 0}, {addr > = {s_addr = 0}, mask = 0}, {addr = {s_addr = 0}, > mask = 0}, {addr = {s_addr = 0}, mask = 0}, {addr = {s_addr = 0}, > mask = 0}, {addr = {s_addr = 0}, mask = 0}, {addr = { > s_addr = 0}, mask = 0}, {addr = {s_addr = 0}, mask = 0}, {addr > = {s_addr = 0}, mask = 0}}, qhook = 0, rhook = 0, > res_h_errno = 0, _vcsock = 0, _flags = 0, _u = {pad = '\000' 51 times>, _ext = {nscount = 0, nsmap = {0, 0, 0}, > nssocks = {0, 0, 0}, nscount6 = 0, nsinit = 0, nsaddrs = {0x0, > 0x0, 0x0}, initstamp = 0}}}, end_padding = 0x7f6ef489d700 ""} > > (gdb) print result > $3 = '\000' New description: we upgraded varnish from 4.0.0 to try to fix this issue but is still reproducing Varnish restarts his childs with signal 11. debug info: {{{ varnish> panic.show 200 Last panic at: Wed, 02 Jul 2014 08:19:22 GMT Assert error in Lck__Lock(), cache/cache_lck.c line 71: Condition((pthread_mutex_lock(&ilck->mtx)) == 0) not true. thread = (cache-worker) ident = Linux,2.6.32-431.el6.x86_64,x86_64,-smalloc,-smalloc,-hcritbit,epoll Backtrace: 0x43b0bd: /usr/sbin/varnishd() [0x43b0bd] 0x43b3cd: /usr/sbin/varnishd() [0x43b3cd] 0x436aba: /usr/sbin/varnishd(Lck__Lock+0xd2) [0x436aba] 0x4184e0: /usr/sbin/varnishd(VBO_waitlen+0x30) [0x4184e0] 0x4436aa: /usr/sbin/varnishd() [0x4436aa] 0x4442ae: /usr/sbin/varnishd(V1D_Deliver+0x597) [0x4442ae] 0x43f697: /usr/sbin/varnishd() [0x43f697] 0x4429f5: /usr/sbin/varnishd(CNT_Request+0x529) [0x4429f5] 0x433a40: /usr/sbin/varnishd(HTTP1_Session+0x429) [0x433a40] 0x44594b: /usr/sbin/varnishd() [0x44594b] req = 0x7f6e3e2bc020 { sp = 0x7f6ff981b8e0, vxid = 1084098135, step = R_STP_DELIVER, req_body = R_BODY_NONE, err_code = 200, err_reason = (null), restarts = 0, esi_level = 0 sp = 0x7f6ff981b8e0 { fd = 40, vxid = 10356310, client = 10.13.3.40 41544, step = S_STP_WORKING, }, worker = 0x7f6e55b4abf0 { ws = 0x7f6e55b4ae08 { id = "wrk", {s,f,r,e} = {0x7f6e55b4a3d0,0x7f6e55b4a3d0,(nil),+2048}, }, VCL::method = 0x0, VCL::return = deliver, }, ws = 0x7f6e3e2bc1b8 { id = "req", {s,f,r,e} = {0x7f6e3e2be010,+416,(nil),+57360}, }, }}} {{{ (gdb) bt #0 0x00000000004184eb in VBO_waitlen () #1 0x00000000004436aa in v1d_dorange () #2 0x00000000004442ae in V1D_Deliver () #3 0x000000000043f697 in cnt_deliver () #4 0x00000000004429f5 in CNT_Request () #5 0x0000000000433a40 in HTTP1_Session () #6 0x000000000044594b in ses_req_pool_task () #7 0x0000000000445c16 in ses_sess_pool_task () #8 0x00000000004461c2 in SES_pool_accept_task () #9 0x000000000043e035 in Pool_Work_Thread () #10 0x0000000000456218 in wrk_thread_real () #11 0x0000000000456341 in WRK_thread () #12 0x000000352ba079d1 in start_thread (arg=0x7f6ef489d700) at pthread_create.c:301 #13 0x000000352b6e8b6d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:115 (gdb) frame 12 #12 0x000000352ba079d1 in start_thread (arg=0x7f6ef489d700) at pthread_create.c:301 301 THREAD_SETMEM (pd, result, CALL_THREAD_FCT (pd)); (gdb) print pd $1 = (struct pthread *) 0x7f6ef489d700 (gdb) print *pd $2 = {{header = {tcb = 0x7f6ef489d700, dtv = 0x7f6f76c11850, self = 0x7f6ef489d700, multiple_threads = 1, gscope_flag = 0, sysinfo = 0, stack_guard = 5883901357024911704, pointer_guard = 13223282705651353577, vgetcpu_cache = {0, 0}, private_futex = 128, rtld_must_xmm_save = 0, __private_tm = {0x0, 0x0, 0x0, 0x0, 0x0}, __unused2 = 0, rtld_savespace_sse = {{{ 0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}, {{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}, {{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}, {{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}, {{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}, {{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}, {{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}, {{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}}, __padding = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}}, __padding = {0x7f6ef489d700, 0x7f6f76c11850, 0x7f6ef489d700, 0x1, 0x0, 0x51a7d11c26e8a158, 0xb782898f92ea47e9, 0x0, 0x0, 0x80, 0x0 }}, list = {next = 0x7f6ef48a99c0, prev = 0x7f6ef48919c0}, tid = 21040, pid = 20618, robust_prev = 0x7f6ef489d9e0, robust_head = {list = 0x7f6ef489d9e0, futex_offset = -32, list_op_pending = 0x0}, cleanup = 0x0, cleanup_jmp_buf = 0x7f6ef489cf30, cancelhandling = 0, flags = 0, specific_1stblock = {{seq = 1, data = 0x7f6ef489d6d0}, {seq = 1, data = 0x7f6ef489d6f8}, {seq = 0, data = 0x0}, {seq = 1, data = 0x7f6e5a352020}, {seq = 1, data = 0x0}, {seq = 1, data = 0x48354a}, {seq = 0, data = 0x0} }, specific = {0x7f6ef489da10, 0x0 }, specific_used = true, report_events = false, user_stack = false, stopped_start = false, parent_cancelhandling = 0, lock = 0, setxid_futex = 0, cpuclock_offset = 7204934909057508, joinid = 0x7f6ef489d700, result = 0x0, schedparam = {__sched_priority = 0}, schedpolicy = 0, start_routine = 0x45631a , arg = 0x7f70a0c0e300, eventbuf = {eventmask = {event_bits = {0, 0}}, eventnum = TD_ALL_EVENTS, eventdata = 0x0}, nextevent = 0x0, exc = {exception_class = 0, exception_cleanup = 0, private_1 = 0, private_2 = 0}, stackblock = 0x7f6ef4892000, stackblock_size = 49152, guardsize = 4096, reported_guardsize = 4096, tpp = 0x0, res = {retrans = 0, retry = 0, options = 0, nscount = 0, nsaddr_list = {{sin_family = 0, sin_port = 0, sin_addr = {s_addr = 0}, sin_zero = "\000\000\000\000\000\000\000"}, {sin_family = 0, sin_port = 0, sin_addr = {s_addr = 0}, sin_zero = "\000\000\000\000\000\000\000"}, {sin_family = 0, sin_port = 0, sin_addr = {s_addr = 0}, sin_zero = "\000\000\000\000\000\000\000"}}, id = 0, dnsrch = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, defdname = '\000' , pfcode = 0, ndots = 0, nsort = 0, ipv6_unavail = 0, unused = 0, sort_list = {{addr = { s_addr = 0}, mask = 0}, {addr = {s_addr = 0}, mask = 0}, {addr = {s_addr = 0}, mask = 0}, {addr = {s_addr = 0}, mask = 0}, {addr = {s_addr = 0}, mask = 0}, {addr = {s_addr = 0}, mask = 0}, {addr = {s_addr = 0}, mask = 0}, {addr = { s_addr = 0}, mask = 0}, {addr = {s_addr = 0}, mask = 0}, {addr = {s_addr = 0}, mask = 0}}, qhook = 0, rhook = 0, res_h_errno = 0, _vcsock = 0, _flags = 0, _u = {pad = '\000' , _ext = {nscount = 0, nsmap = {0, 0, 0}, nssocks = {0, 0, 0}, nscount6 = 0, nsinit = 0, nsaddrs = {0x0, 0x0, 0x0}, initstamp = 0}}}, end_padding = 0x7f6ef489d700 ""} (gdb) print result $3 = '\000' }}} -- -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Mon Jul 7 10:10:13 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Mon, 07 Jul 2014 10:10:13 -0000 Subject: [Varnish] #1543: Panic message: Assert error in Lck_Delete(), cache/cache_lck.c line 198 In-Reply-To: <047.3f011ce053d5cbd53e3230c73f3d7a63@varnish-cache.org> References: <047.3f011ce053d5cbd53e3230c73f3d7a63@varnish-cache.org> Message-ID: <062.245a4b498826dc51efaaaf7ddcdf2ed2@varnish-cache.org> #1543: Panic message: Assert error in Lck_Delete(), cache/cache_lck.c line 198 -----------------------+---------------------------------- Reporter: WhizzBANG | Owner: Type: defect | Status: Need info Priority: high | Milestone: Varnish 4.0 release Component: varnishd | Version: 4.0.1 Severity: blocker | Resolution: Keywords: | -----------------------+---------------------------------- Changes (by slink): * status: new => Need info Comment: This is likely to be fixed with b2b6e329da78e465720bec9a26a905e8a77cab92 Could you check if this still happens in master? -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Mon Jul 7 10:13:05 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Mon, 07 Jul 2014 10:13:05 -0000 Subject: [Varnish] #1506: Make better use of Content-Length information: Avoid chunked responses, more control over Range handling In-Reply-To: <050.7446d258f6b1af112a619a4b721885a7@varnish-cache.org> References: <050.7446d258f6b1af112a619a4b721885a7@varnish-cache.org> Message-ID: <065.aaf0393c5a989be6e2be79f89e7e3840@varnish-cache.org> #1506: Make better use of Content-Length information: Avoid chunked responses, more control over Range handling --------------------------+---------------------------------- Reporter: DonMacAskill | Owner: phk Type: defect | Status: new Priority: normal | Milestone: Varnish 4.0 release Component: varnishd | Version: 4.0.0 Severity: critical | Resolution: Keywords: | --------------------------+---------------------------------- Comment (by slink): For completeness, I am adding the patches I had prepared. phks solution is likely to look completely different. -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Thu Jul 10 14:28:46 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Thu, 10 Jul 2014 14:28:46 -0000 Subject: [Varnish] #1543: Panic message: Assert error in Lck_Delete(), cache/cache_lck.c line 198 In-Reply-To: <047.3f011ce053d5cbd53e3230c73f3d7a63@varnish-cache.org> References: <047.3f011ce053d5cbd53e3230c73f3d7a63@varnish-cache.org> Message-ID: <062.875b67fbdf59b6c5c85c1ac1a3aa68b1@varnish-cache.org> #1543: Panic message: Assert error in Lck_Delete(), cache/cache_lck.c line 198 -----------------------+---------------------------------- Reporter: WhizzBANG | Owner: Type: defect | Status: Need info Priority: high | Milestone: Varnish 4.0 release Component: varnishd | Version: 4.0.1 Severity: blocker | Resolution: Keywords: | -----------------------+---------------------------------- Comment (by WhizzBANG): While I compiled varnish-4.0.1 I get error: vmod_std_querysort.c: In function ?vmod_querysort?: vmod_std_querysort.c:63: error: ?param? may be used uninitialized in this function I found: https://www.varnish-cache.org/trac/ticket/1533 but now I get: vmod_std_querysort.c: In function 'vmod_querysort': vmod_std_querysort.c:88: error: implicit declaration of function 'WS_MarkOverflow' -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Sat Jul 12 23:51:47 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Sat, 12 Jul 2014 23:51:47 -0000 Subject: [Varnish] #1543: Panic message: Assert error in Lck_Delete(), cache/cache_lck.c line 198 In-Reply-To: <047.3f011ce053d5cbd53e3230c73f3d7a63@varnish-cache.org> References: <047.3f011ce053d5cbd53e3230c73f3d7a63@varnish-cache.org> Message-ID: <062.2bb521842cc0406b41cd09c21cef2b26@varnish-cache.org> #1543: Panic message: Assert error in Lck_Delete(), cache/cache_lck.c line 198 -----------------------+---------------------------------- Reporter: WhizzBANG | Owner: Type: defect | Status: Need info Priority: high | Milestone: Varnish 4.0 release Component: varnishd | Version: 4.0.1 Severity: blocker | Resolution: Keywords: | -----------------------+---------------------------------- Comment (by fgsch): Replying to [comment:2 WhizzBANG]: > While I compiled varnish-4.0.1 I get error: > > > {{{ > vmod_std_querysort.c: In function ?vmod_querysort?: > vmod_std_querysort.c:63: error: ?param? may be used uninitialized in this function > }}} > > > I found: > https://www.varnish-cache.org/trac/ticket/1533 > > but now I get: > > > {{{ > vmod_std_querysort.c: In function 'vmod_querysort': > vmod_std_querysort.c:88: error: implicit declaration of function 'WS_MarkOverflow' > }}} This was fixed in 2d156c6c6fdbc0eb97c11ec03badc7eb7f459438. Can you check trunk or cherry pick that commit? -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Mon Jul 14 07:23:00 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Mon, 14 Jul 2014 07:23:00 -0000 Subject: [Varnish] #1544: segfault at 40 error 4 in varnishd Message-ID: <049.7736650ccfff554fe37de96b015b88fc@varnish-cache.org> #1544: segfault at 40 error 4 in varnishd -------------------------+---------------------- Reporter: g.gerritsen | Type: defect Status: new | Priority: normal Milestone: | Component: varnishd Version: 4.0.1 | Severity: normal Keywords: | -------------------------+---------------------- Had two segfaults, varnish recovered without any problem. no vmods except std and directors are in use. no inline c is used. {{{ varnishd[2604]: segfault at 40 ip 00000000004184eb sp 00007f18b8b2b1a0 error 4 in varnishd[400000+a0000] varnishd[31577]: segfault at 40 ip 00000000004184eb sp 00007f18b926e1a0 error 4 in varnishd[400000+a0000] }}} {{{ varnishd[1747]: Child (30897) died signal=11 varnishd[1747]: child (59176) Started varnishd[1747]: Child (59176) said Child starts }}} centos 6.4 -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Mon Jul 14 11:34:06 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Mon, 14 Jul 2014 11:34:06 -0000 Subject: [Varnish] #1545: varnish-4.0 RHEL/CentOS repository broken Message-ID: <046.67269b55475cab056c3c17cc85573542@varnish-cache.org> #1545: varnish-4.0 RHEL/CentOS repository broken ----------------------+-------------------- Reporter: zviratko | Type: defect Status: new | Priority: normal Milestone: | Component: build Version: unknown | Severity: normal Keywords: | ----------------------+-------------------- I installed the varnish repository via https://repo.varnish-cache.org/redhat/varnish-4.0.el6.rpm But all varnish packages are not showing in yum, only a subset of them. Looks like all packages are not indexed in the repository. Repository configuration looks OK and it is enabled (as evident by the presence of varnish-debuginfo-4.0.1 package only present in it) http://pastebin.com/JenyPYe7 -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Mon Jul 14 11:45:04 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Mon, 14 Jul 2014 11:45:04 -0000 Subject: [Varnish] #1545: varnish-4.0 RHEL/CentOS repository broken In-Reply-To: <046.67269b55475cab056c3c17cc85573542@varnish-cache.org> References: <046.67269b55475cab056c3c17cc85573542@varnish-cache.org> Message-ID: <061.b388ca015d669791534e76683894a6ef@varnish-cache.org> #1545: varnish-4.0 RHEL/CentOS repository broken ----------------------+---------------------- Reporter: zviratko | Owner: Type: defect | Status: new Priority: normal | Milestone: Component: build | Version: unknown Severity: normal | Resolution: Keywords: | ----------------------+---------------------- Description changed by lkarsten: Old description: > I installed the varnish repository via > https://repo.varnish-cache.org/redhat/varnish-4.0.el6.rpm > > But all varnish packages are not showing in yum, only a subset of them. > Looks like all packages are not indexed in the repository. Repository > configuration looks OK and it is enabled (as evident by the presence of > varnish-debuginfo-4.0.1 package only present in it) > > http://pastebin.com/JenyPYe7 New description: I installed the varnish repository via https://repo.varnish-cache.org/redhat/varnish-4.0.el6.rpm But all varnish packages are not showing in yum, only a subset of them. Looks like all packages are not indexed in the repository. Repository configuration looks OK and it is enabled (as evident by the presence of varnish-debuginfo-4.0.1 package only present in it) {{{ [root at lbdevel.devel yum.repos.d]# yum --enablerepo=varnish-4.0 search --showduplicates varnish Loaded plugins: fastestmirror, protectbase, versionlock Loading mirror speeds from cached hostfile * epel: mirror.hosting90.cz 377 packages excluded due to repository protections epel/pkgtags | 1.0 MB 00:00 ==================================================================================== N/S Matched: varnish ==================================================================================== varnish-debuginfo-4.0.0-0.20131129tp1.el6.x86_64 : Debug information for package varnish varnish-debuginfo-4.0.0-0.20140328beta1.el6.x86_64 : Debug information for package varnish varnish-debuginfo-4.0.0-1.el6.x86_64 : Debug information for package varnish varnish-debuginfo-4.0.1-1.el6.x86_64 : Debug information for package varnish varnish-docs-2.1.5-5.el6.x86_64 : Documentation files for varnish varnish-docs-4.0.0-0.20131129tp1.el6.x86_64 : Documentation files for varnish varnish-docs-4.0.0-0.20140328beta1.el6.x86_64 : Documentation files for varnish varnish-docs-4.0.0-1.el6.x86_64 : Documentation files for varnish varnish-docs-4.0.1-1.el6.x86_64 : Documentation files for varnish varnish-libs-3.0.4-1.el6.x86_64 : Libraries for varnish varnish-libs-4.0.0-1.el6.x86_64 : Libraries for varnish varnish-libs-devel-2.1.5-5.el6.i686 : Development files for varnish-libs varnish-libs-devel-2.1.5-5.el6.x86_64 : Development files for varnish-libs varnish-libs-devel-4.0.0-0.20131129tp1.el6.x86_64 : Development files for varnish-libs varnish-libs-devel-4.0.0-0.20140328beta1.el6.x86_64 : Development files for varnish-libs varnish-libs-devel-4.0.0-1.el6.x86_64 : Development files for varnish-libs varnish-libs-devel-4.0.1-1.el6.x86_64 : Development files for varnish-libs varnish-release-4.0-3.el6.noarch : Varnish 4.0 package repository configuration varnish-3.0.4-1.el6.x86_64 : High-performance HTTP accelerator }}} -- -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Mon Jul 14 12:01:57 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Mon, 14 Jul 2014 12:01:57 -0000 Subject: [Varnish] #1545: varnish-4.0 RHEL/CentOS repository broken In-Reply-To: <046.67269b55475cab056c3c17cc85573542@varnish-cache.org> References: <046.67269b55475cab056c3c17cc85573542@varnish-cache.org> Message-ID: <061.184b5a9857f39896ff018b394d78503d@varnish-cache.org> #1545: varnish-4.0 RHEL/CentOS repository broken ----------------------+---------------------- Reporter: zviratko | Owner: Type: defect | Status: new Priority: normal | Milestone: Component: build | Version: unknown Severity: normal | Resolution: Keywords: | ----------------------+---------------------- Comment (by zviratko): This is from a different "clean" machine that only had 3.0.4 installed from a copy in my own local repository {{{ [root at ap01deploy.deploy ~]# rpm --nosignature -i https://repo.varnish- cache.org/redhat/varnish-4.0.el6.rpm [root at ap01deploy.deploy ~]# yum list --showduplicates varnish Loaded plugins: fastestmirror, protectbase, versionlock Loading mirror speeds from cached hostfile varnish-4.0 | 951 B 00:00 varnish-4.0/primary | 3.8 kB 00:00 varnish-4.0 20/20 10 packages excluded due to repository protections Installed Packages varnish.x86_64 3.0.4-1.el6 @lmcsys Available Packages varnish.x86_64 3.0.4-1.el6 lmcsys }}} -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Mon Jul 14 12:10:41 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Mon, 14 Jul 2014 12:10:41 -0000 Subject: [Varnish] #1545: varnish-4.0 RHEL/CentOS repository broken In-Reply-To: <046.67269b55475cab056c3c17cc85573542@varnish-cache.org> References: <046.67269b55475cab056c3c17cc85573542@varnish-cache.org> Message-ID: <061.ec4b6ecffffea9e15527b44f9b22f31f@varnish-cache.org> #1545: varnish-4.0 RHEL/CentOS repository broken ----------------------+---------------------- Reporter: zviratko | Owner: Type: defect | Status: new Priority: normal | Milestone: Component: build | Version: unknown Severity: normal | Resolution: Keywords: | ----------------------+---------------------- Comment (by zviratko): Credits go to fgs at IRC problem was caused by yum protectbase plugin and the varnish-3.0.4 package in my own repository. adding protect=1 to the varnish repository configuration fixed the problem. Maybe add a notice to this page in case someone else did the same thing: https://www.varnish-cache.org/installation/redhat Sorry, It was my local problem after all. -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Mon Jul 14 12:21:38 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Mon, 14 Jul 2014 12:21:38 -0000 Subject: [Varnish] #1545: varnish-4.0 RHEL/CentOS repository broken In-Reply-To: <046.67269b55475cab056c3c17cc85573542@varnish-cache.org> References: <046.67269b55475cab056c3c17cc85573542@varnish-cache.org> Message-ID: <061.a46dd07eff8af9b8c141ee92e4896eac@varnish-cache.org> #1545: varnish-4.0 RHEL/CentOS repository broken ----------------------+------------------------- Reporter: zviratko | Owner: Type: defect | Status: closed Priority: normal | Milestone: Component: build | Version: unknown Severity: normal | Resolution: worksforme Keywords: | ----------------------+------------------------- Changes (by lkarsten): * status: new => closed * resolution: => worksforme Comment: Based on the discussion on IRC, it seems like the reporters use of protect=1 on other enabled repositories provoked this. Closing. -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Mon Jul 14 14:16:30 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Mon, 14 Jul 2014 14:16:30 -0000 Subject: [Varnish] #1546: vrt.h contains various director related declarations that are no longer referenced. Message-ID: <043.0159882f25c8535ca251116021ea50be@varnish-cache.org> #1546: vrt.h contains various director related declarations that are no longer referenced. ----------------------+------------------- Reporter: daghf | Owner: Type: defect | Status: new Priority: normal | Milestone: Component: varnishd | Version: trunk Severity: normal | Keywords: ----------------------+------------------- In particular, {{{ struct vrt_dir_random_entry struct vrt_dir_random struct vrt_dir_dns_entry struct vrt_dir_dns }}} -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Mon Jul 14 15:23:42 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Mon, 14 Jul 2014 15:23:42 -0000 Subject: [Varnish] #1537: Varnish .deb for wheezy is not compiled against/does not depend on jemalloc In-Reply-To: <044.e095537cfdf43d97bf18575d45c24ffd@varnish-cache.org> References: <044.e095537cfdf43d97bf18575d45c24ffd@varnish-cache.org> Message-ID: <059.f359e1114241707919da201487be88eb@varnish-cache.org> #1537: Varnish .deb for wheezy is not compiled against/does not depend on jemalloc -----------------------+----------------------- Reporter: joakim | Owner: lkarsten Type: defect | Status: new Priority: high | Milestone: Component: packaging | Version: 4.0.1 Severity: normal | Resolution: Keywords: | -----------------------+----------------------- Comment (by lkarsten): New packages are being built now. I'll check them in the morning and upload to the repository when everything looks good. Thanks for reporting this. -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Tue Jul 15 11:53:44 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Tue, 15 Jul 2014 11:53:44 -0000 Subject: [Varnish] #1537: Varnish .deb for wheezy is not compiled against/does not depend on jemalloc In-Reply-To: <044.e095537cfdf43d97bf18575d45c24ffd@varnish-cache.org> References: <044.e095537cfdf43d97bf18575d45c24ffd@varnish-cache.org> Message-ID: <059.b08200834529c369b9be1791dbf90cbc@varnish-cache.org> #1537: Varnish .deb for wheezy is not compiled against/does not depend on jemalloc -----------------------+----------------------- Reporter: joakim | Owner: lkarsten Type: defect | Status: closed Priority: high | Milestone: Component: packaging | Version: 4.0.1 Severity: normal | Resolution: fixed Keywords: | -----------------------+----------------------- Changes (by lkarsten): * status: new => closed * resolution: => fixed Comment: Updated packages that depend and use jemalloc should be available in the repository now. Closing. -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Tue Jul 15 12:00:27 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Tue, 15 Jul 2014 12:00:27 -0000 Subject: [Varnish] #1534: is groff really required for varnish 4+ ? In-Reply-To: <049.1ec3b1ed6de1bca9c9bcd13d03edaaa2@varnish-cache.org> References: <049.1ec3b1ed6de1bca9c9bcd13d03edaaa2@varnish-cache.org> Message-ID: <064.3f6ed40ec4ea55cad781d6f83cf1330f@varnish-cache.org> #1534: is groff really required for varnish 4+ ? -------------------------+----------------------- Reporter: g.gerritsen | Owner: lkarsten Type: defect | Status: closed Priority: low | Milestone: Component: build | Version: trunk Severity: trivial | Resolution: fixed Keywords: | -------------------------+----------------------- Changes (by lkarsten): * status: new => closed * resolution: => fixed Comment: You're right, of course. I've removed groff-base (debs) and groff (redhat) from the list of dependencies in master now. (commits: 1f198039f07b5057cfbb23eeedb72c1c5ddf6caa in varnish-cache- debian.git, 5f0194694d9cec5b7dfb045abe8b2e7df3e748c7 in varnish- cache.git.) Closing the ticket. -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Tue Jul 15 13:37:33 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Tue, 15 Jul 2014 13:37:33 -0000 Subject: [Varnish] #1535: sphinx-build 0.6.6 incompatibilities (centos 6.5) In-Reply-To: <049.30f178036ed49f3e180450203778ca42@varnish-cache.org> References: <049.30f178036ed49f3e180450203778ca42@varnish-cache.org> Message-ID: <064.6562d0bac946dfaab5617c1b381e303c@varnish-cache.org> #1535: sphinx-build 0.6.6 incompatibilities (centos 6.5) -------------------------+------------------------ Reporter: g.gerritsen | Owner: lkarsten Type: defect | Status: Need info Priority: normal | Milestone: Component: build | Version: trunk Severity: normal | Resolution: Keywords: | -------------------------+------------------------ Changes (by lkarsten): * status: assigned => Need info Comment: Thanks for reporting this. I've removed -n from the makefile for now. When I first had a look at this, I started digging into the RST to get these includes working properly. I redid them on a computer with more recent sphinx-build on, a couple of months back. This works just fine on EL7, which has a newer sphinx-build. We define "supported platform" as a platform where the tarball release builds and runs. We don't necessarily support building from git on all platforms. EL5 have had this state for a few years now, automake there is too old. Which is why I'm leaning torwards closing this as wontfix. Can you show me a use-case where building from tarball isn't good enough for EL6? -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Tue Jul 15 13:40:52 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Tue, 15 Jul 2014 13:40:52 -0000 Subject: [Varnish] #1535: sphinx-build 0.6.6 incompatibilities (centos 6.5) In-Reply-To: <049.30f178036ed49f3e180450203778ca42@varnish-cache.org> References: <049.30f178036ed49f3e180450203778ca42@varnish-cache.org> Message-ID: <064.69b34566c2a7b58cda7e724c6556a711@varnish-cache.org> #1535: sphinx-build 0.6.6 incompatibilities (centos 6.5) -------------------------+------------------------ Reporter: g.gerritsen | Owner: lkarsten Type: defect | Status: Need info Priority: normal | Milestone: Component: build | Version: trunk Severity: normal | Resolution: Keywords: | -------------------------+------------------------ Comment (by g.gerritsen): Wontfix sounds good, if someone really needs to build varnish on el6 they can always fix the Makefile -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Tue Jul 15 13:43:40 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Tue, 15 Jul 2014 13:43:40 -0000 Subject: [Varnish] #1521: Varnish 4 VCL compilation failed on x86_64 In-Reply-To: <046.e981867b3cf05ae3a7f8c85fb022c396@varnish-cache.org> References: <046.e981867b3cf05ae3a7f8c85fb022c396@varnish-cache.org> Message-ID: <061.0ceb859181c401ddf39678821b9753ae@varnish-cache.org> #1521: Varnish 4 VCL compilation failed on x86_64 ----------------------+------------------------ Reporter: yoloseem | Owner: Type: defect | Status: Need info Priority: normal | Milestone: Component: build | Version: 4.0.0 Severity: normal | Resolution: Keywords: | ----------------------+------------------------ Comment (by lkarsten): I had a look at the dtruss output, but I can't find anything conclusive in there. It forks, closes an endless amount of fds, then outputs the error message from clang. -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Fri Jul 18 04:53:59 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Fri, 18 Jul 2014 04:53:59 -0000 Subject: [Varnish] #1548: 642 813 exams The next one again Message-ID: <049.f5a0ffaa02c03bfac8c10bc355caf275@varnish-cache.org> #1548: 642 813 exams The next one again -------------------------+-------------------- Reporter: manjammehta | Type: defect Status: new | Priority: normal Milestone: | Component: build Version: unknown | Severity: normal Keywords: 642-813 | -------------------------+-------------------- I Padres an what if I forgot again check it out I have to do is put question mark where the next component says hey man I want the IP address and inane in this format abed some the given an IP address 192168 not one to 0 that's not an IP address at the network address I'm going to go about 1.1 skinny is a simple classy Cinema Score if forget what comes next is my question mark says again I want the IP subnet mask no-win this format now for do another question mark I get see are which stands for carriage return meaning hit and her I didn't get in there so that means that set the IP address appropriately I always show my work show run again we'll go in and show me what?ve configured says right here too exactly what I typed in from global Multitask interface cereals /urn the night at IP address 192 168 11 and then [http://642-813dump.com/ 642-813] the summit mask 7 IP here let's go said an IP address on Pulaski to just clicked over to plaster till again maybe I forgot keep forgetting so I can?t dealers show run oh yeah its interface specify the interface in the IP address can let me go in said interface cereals /urn IP address 192 168 that 1.2 and her done want to see my work showrunning-config I decide to Tabitha completed for me and I can go and I can see okay and Padres set up so we should be overpaying at this point show interface cereals /urn shows us that is up line protocol is up layer winnower to a good and we set an IP address 192 168 not one not two sup unless I messed up typing the Eyed Peas in we should be able to ping pilaster one paying 1922 168 1.1 and I've got a response a hundred percent I now have a layer 1 where to and with the IP's upset layer 3collectivity let's take a look at some other commands that we can use to check out bare fiber connectivity as well alright so we've used the show running configure reviews show interface is a couple others shell I P interface only show me interfaces with IP setup on and I statistics meaning Internet Protocol shows me I subnet mask couple other things that we will use later outgoing access lists which will talk about a little bit later as well as all IP information this cane helpful for monitoring troubleshooting traffic but it's notional tell us you know it's not critical for lettings know that the interface there one there to land 3ra but it does show us that is up land protocols up the IP. http://642-813dump.com/ -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Fri Jul 18 05:57:38 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Fri, 18 Jul 2014 05:57:38 -0000 Subject: [Varnish] #1549: 642 813 exams Gets a little bit confusing Message-ID: <049.30d91ef630a27cc5e1e062fe762794ab@varnish-cache.org> #1549: 642 813 exams Gets a little bit confusing -------------------------+-------------------- Reporter: manjammehta | Type: defect Status: new | Priority: normal Milestone: | Component: build Version: unknown | Severity: normal Keywords: 642-813 | -------------------------+-------------------- The people are going to use to access your device as telnet network connectivity must be up there by default needs to be password said on so that's another issue you can even tell Matt in without putting a password and that's another way that people tonnage in an access your device so they can access it through the console auxiliary port or Telnet so those are three different methods people can use to access your router each one of those methods must have a password set if people have physical access your device doesn't matter if there's a password set because they can easily bypass that let's go in and take look at configuring the passwords on this device of somebody does gain access they're going to have to enter password get into any important modes and this is the more they're going to wanton get into I'm in privileged mode right now in the [http://642-813dump.com/ 642-813] router prop that are brought up I know I'm and privilege mode because there?s a pound sign here sup if I were to exit out right now it shows man connected through consoles 0 it's available I hit enter and I'm automatically in user mode now in use Armada can't make any configuration changes to the router but I can go in and view the configuration on the router when I need to do is set it up to where if some is even get to user mode they?ve got an hour a password sup an earned privilege mild next going to global mobile to confspace TCA man and now I need to go in and enter password outlying consuls Iraq has console as the 1 I'm connecting to there's only one starts with the number 0 so I type in line console 0 and hit enter now I have to configure password attack password Cisco so anyone tries enter this router and get abuse ammo they're going to have to remember that the password a Cisco when they plug into consuls era now I?d probably use a more secure password as Cisco is not too tough to figure out however let's take a look at that password well tested exit all the way out and for some reason I'm thinking I'm not going to get prompted for a password I hit Enter I did not get prompted for password my hunch was right the reason. http://642-813dump.com/ -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Fri Jul 18 09:15:29 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Fri, 18 Jul 2014 09:15:29 -0000 Subject: [Varnish] #1550: maximum url length for GET requests? Message-ID: <045.20b1d2549b2102f7461ec1b6a4be547f@varnish-cache.org> #1550: maximum url length for GET requests? -------------------------------------------------+------------------------- Reporter: rolafia | Type: defect Status: new | Priority: normal Milestone: | Component: varnishd Version: 3.0.2 | Severity: normal Keywords: url length, maximum length, | configuration options | -------------------------------------------------+------------------------- Hello, since a few days we get a lot of 503 errors which have been traced back to the url length. we have a lot of parameters which extends the url to a length of over 8200 characters. Varnish will throw the 503 if the length is 8201. Is there any possibility or configuration to extend the url length? I know that a url of 8200 characters is very long but we use the varnish in our backend and there we need longer URLs. We can change the request from GET to POST, but this will be a bigger change. So the best option for us would be a config to change that setting. -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Fri Jul 18 09:16:15 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Fri, 18 Jul 2014 09:16:15 -0000 Subject: [Varnish] #1550: maximum url length for GET requests? In-Reply-To: <045.20b1d2549b2102f7461ec1b6a4be547f@varnish-cache.org> References: <045.20b1d2549b2102f7461ec1b6a4be547f@varnish-cache.org> Message-ID: <060.bf64473b59dcac0e1dc88efc21c2c005@varnish-cache.org> #1550: maximum url length for GET requests? -------------------------------------------------+------------------------- Reporter: rolafia | Owner: Type: defect | Status: new Priority: normal | Milestone: Component: varnishd | Version: 3.0.2 Severity: normal | Resolution: Keywords: url length, maximum length, | configuration options | -------------------------------------------------+------------------------- Comment (by rolafia): Thanks for any advise. Best, Roland -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Fri Jul 18 10:32:00 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Fri, 18 Jul 2014 10:32:00 -0000 Subject: [Varnish] #1535: sphinx-build 0.6.6 incompatibilities (centos 6.5) In-Reply-To: <049.30f178036ed49f3e180450203778ca42@varnish-cache.org> References: <049.30f178036ed49f3e180450203778ca42@varnish-cache.org> Message-ID: <064.bda1bde77026036ab013ba3e3b368828@varnish-cache.org> #1535: sphinx-build 0.6.6 incompatibilities (centos 6.5) -------------------------+----------------------- Reporter: g.gerritsen | Owner: lkarsten Type: defect | Status: closed Priority: normal | Milestone: Component: build | Version: trunk Severity: normal | Resolution: wontfix Keywords: | -------------------------+----------------------- Changes (by lkarsten): * status: needinfo => closed * resolution: => wontfix -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Fri Jul 18 10:36:37 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Fri, 18 Jul 2014 10:36:37 -0000 Subject: [Varnish] #1550: maximum url length for GET requests? In-Reply-To: <045.20b1d2549b2102f7461ec1b6a4be547f@varnish-cache.org> References: <045.20b1d2549b2102f7461ec1b6a4be547f@varnish-cache.org> Message-ID: <060.cbc862f498313996f11128b5c51fef8f@varnish-cache.org> #1550: maximum url length for GET requests? -------------------------------------------------+------------------------- Reporter: rolafia | Owner: Type: defect | Status: closed Priority: normal | Milestone: Component: varnishd | Version: 3.0.2 Severity: normal | Resolution: worksforme Keywords: url length, maximum length, | configuration options | -------------------------------------------------+------------------------- Changes (by lkarsten): * status: new => closed * resolution: => worksforme Comment: Please use the bugtracker only for real bugs, not support requests. (tip: look up the http_req_hdr_len and http_resp_hdr_len parameters) -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Fri Jul 18 10:41:30 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Fri, 18 Jul 2014 10:41:30 -0000 Subject: [Varnish] #1457: vcl.show should expand includes In-Reply-To: <043.f14786107b6d266ced5da4fa3a4b15af@varnish-cache.org> References: <043.f14786107b6d266ced5da4fa3a4b15af@varnish-cache.org> Message-ID: <058.b6e0b633cba4a280bfffdaba671cc4e5@varnish-cache.org> #1457: vcl.show should expand includes ----------------------+---------------------- Reporter: slink | Owner: daghf Type: defect | Status: new Priority: normal | Milestone: Component: varnishd | Version: unknown Severity: normal | Resolution: Keywords: | ----------------------+---------------------- Comment (by lkarsten): dag posted a preliminary patch to varnish-dev@ for this the other day; needs review. -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Mon Jul 21 12:42:57 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Mon, 21 Jul 2014 12:42:57 -0000 Subject: [Varnish] #1512: Changes to bereq are lost between v_b_r and v_b_f In-Reply-To: <043.04a15d59ba73f8274523e0e9150e84a7@varnish-cache.org> References: <043.04a15d59ba73f8274523e0e9150e84a7@varnish-cache.org> Message-ID: <058.46dcbec3471e30694e585b03dd2adefa@varnish-cache.org> #1512: Changes to bereq are lost between v_b_r and v_b_f ----------------------+-------------------- Reporter: fgsch | Owner: phk Type: defect | Status: new Priority: normal | Milestone: Component: varnishd | Version: trunk Severity: normal | Resolution: Keywords: | ----------------------+-------------------- Changes (by slink): * cc: nils.goroll@? (added) * component: build => varnishd -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Tue Jul 22 08:28:13 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Tue, 22 Jul 2014 08:28:13 -0000 Subject: [Varnish] #1551: Panic message: Missing errorhandling code in HSH_Purge() Message-ID: <043.53e32d86a2b629abfc16269dbd859a95@varnish-cache.org> #1551: Panic message: Missing errorhandling code in HSH_Purge() ---------------------+---------------------- Reporter: aduca | Type: defect Status: new | Priority: normal Milestone: | Component: varnishd Version: unknown | Severity: normal Keywords: | ---------------------+---------------------- Hi guys, We are using the Varnish version 3.0.5 and we have increased the size of the files that are cached from 10MB to 100MB. After this change once a day Varnish crashes with the following error message: Child (5730) Panic message: Missing errorhandling code in HSH_Purge(), cache_hash.c line 557:#012 Condition(spc >= sizeof *ocp) not true.thread = (cache-worker)#012ident = Linux,3.10.37-47.135.amzn1.x86_64,x86_64,-sfile,-smalloc,-hcritbit,epoll#012Backtrace:#012 0x42f308: /usr/sbin/varnishd() [0x42f308]#012 0x427eb8: /usr/sbin/varnishd(HSH_Purge+0x478) [0x427eb8]#012 0x7f42334f6af5: ./vcl.CMxtcaiB.so(VGC_function_vcl_hit+0x75) [0x7f42334f6af5]#012 0x436243: /usr/sbin/varnishd(VCL_hit_method+0x43) [0x436243]#012 0x415fd3: /usr/sbin/varnishd() [0x415fd3]#012 0x418da5: /usr/sbin/varnishd(CNT_Session+0x6a5) [0x418da5]#012 0x431051: /usr/sbin/varnishd() [0x431051]#012 0x7f423bb75f18: /lib64/libpthread.so.0(+0x7f18) [0x7f423bb75f18]#012 0x7f423b8abe0d: /lib64/libc.so.6(clone+0x6d) [0x7f423b8abe0d]#012sp = 0x7e441fa48008 {#012 fd = 70, id = 70, xid = 1910255048,#012 client = 10.xxx.xxx.xxx 47837,#012 step = STP_HIT,#012 handling = deliver,#012 restarts = 0, esi_level = 0#012 flags = #012 bodystatus = 5#012 ws = 0x7e441fa48080 { #012 id = "sess",#012 {s,f,r,e} = {0x7e441fa48c78,+232,(nil),+65536},#012 },#012 http[req] = {#012 ws = 0x7e441fa48080[sess]#012 "X-PURGE",#012 "/ValidationFunctions.js",#012 "HTTP/1.1",#012 "Host: xxxxxxxxxx ",#012 "queryparams: ",#012 "Vary: queryparams",#012 },#012 worker = 0x7e4549e20b30 {#012 ws = 0x7e4549e20d68 { #012 id = "wrk",#012 {s,f,r,e} = {0x7e4549e0eac0,+40,+65536,+65536},#012 },#012 },#012 vcl = {#012 srcname = {#012 "input",#012 "Default",#012 "backend.vcl",#012 "security.vcl",#012 },#012 },#012 obj = 0x7f41f3a87000 {#012 xid = 1884579434,#012 ws = 0x7f41f3a87018 { #012 id = "obj",#012 {s,f,r,e} = {0x7f41f3a87228,+408,(nil),+432},#012 },#012 http[obj] = {#012 ws = 0x7f41f3a87018[obj]#012 "HTTP/1.1",#012 "OK",#012 "Cache-Control: public,m I have disabled transparent hugepages as mentioned here - https://www .varnish-cache.org/docs/3.0/tutorial/platformnotes.html however this didn't fixed the issue. I'm monitoring all varnish stats so if you need any other information please let me know. Looking forward to hearing from you. Regards, Andrei -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Tue Jul 22 08:38:57 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Tue, 22 Jul 2014 08:38:57 -0000 Subject: [Varnish] #1551: Panic message: Missing errorhandling code in HSH_Purge() In-Reply-To: <043.53e32d86a2b629abfc16269dbd859a95@varnish-cache.org> References: <043.53e32d86a2b629abfc16269dbd859a95@varnish-cache.org> Message-ID: <058.b8c0fb429a82e5f922df96349c9e7336@varnish-cache.org> #1551: Panic message: Missing errorhandling code in HSH_Purge() ----------------------+---------------------- Reporter: aduca | Owner: Type: defect | Status: new Priority: normal | Milestone: Component: varnishd | Version: unknown Severity: normal | Resolution: Keywords: | ----------------------+---------------------- Description changed by lkarsten: Old description: > Hi guys, > > We are using the Varnish version 3.0.5 and we have increased the size of > the files that are cached from 10MB to 100MB. After this change once a > day Varnish crashes with the following error message: > > Child (5730) Panic message: Missing errorhandling code in HSH_Purge(), > cache_hash.c line 557:#012 Condition(spc >= sizeof *ocp) not true.thread > = (cache-worker)#012ident = > Linux,3.10.37-47.135.amzn1.x86_64,x86_64,-sfile,-smalloc,-hcritbit,epoll#012Backtrace:#012 > 0x42f308: /usr/sbin/varnishd() [0x42f308]#012 0x427eb8: > /usr/sbin/varnishd(HSH_Purge+0x478) [0x427eb8]#012 0x7f42334f6af5: > ./vcl.CMxtcaiB.so(VGC_function_vcl_hit+0x75) [0x7f42334f6af5]#012 > 0x436243: /usr/sbin/varnishd(VCL_hit_method+0x43) [0x436243]#012 > 0x415fd3: /usr/sbin/varnishd() [0x415fd3]#012 0x418da5: > /usr/sbin/varnishd(CNT_Session+0x6a5) [0x418da5]#012 0x431051: > /usr/sbin/varnishd() [0x431051]#012 0x7f423bb75f18: > /lib64/libpthread.so.0(+0x7f18) [0x7f423bb75f18]#012 0x7f423b8abe0d: > /lib64/libc.so.6(clone+0x6d) [0x7f423b8abe0d]#012sp = 0x7e441fa48008 > {#012 fd = 70, id = 70, xid = 1910255048,#012 client = 10.xxx.xxx.xxx > 47837,#012 step = STP_HIT,#012 handling = deliver,#012 restarts = 0, > esi_level = 0#012 flags = #012 bodystatus = 5#012 ws = 0x7e441fa48080 > { #012 id = "sess",#012 {s,f,r,e} = > {0x7e441fa48c78,+232,(nil),+65536},#012 },#012 http[req] = {#012 ws > = 0x7e441fa48080[sess]#012 "X-PURGE",#012 > "/ValidationFunctions.js",#012 "HTTP/1.1",#012 "Host: > xxxxxxxxxx ",#012 "queryparams: ",#012 "Vary: queryparams",#012 > },#012 worker = 0x7e4549e20b30 {#012 ws = 0x7e4549e20d68 { #012 > id = "wrk",#012 {s,f,r,e} = {0x7e4549e0eac0,+40,+65536,+65536},#012 > },#012 },#012 vcl = {#012 srcname = {#012 "input",#012 > "Default",#012 "backend.vcl",#012 "security.vcl",#012 > },#012 },#012 obj = 0x7f41f3a87000 {#012 xid = 1884579434,#012 > ws = 0x7f41f3a87018 { #012 id = "obj",#012 {s,f,r,e} = > {0x7f41f3a87228,+408,(nil),+432},#012 },#012 http[obj] = {#012 > ws = 0x7f41f3a87018[obj]#012 "HTTP/1.1",#012 "OK",#012 > "Cache-Control: public,m > > I have disabled transparent hugepages as mentioned here - https://www > .varnish-cache.org/docs/3.0/tutorial/platformnotes.html however this > didn't fixed the issue. > I'm monitoring all varnish stats so if you need any other information > please let me know. > > Looking forward to hearing from you. > > Regards, > Andrei New description: Hi guys, We are using the Varnish version 3.0.5 and we have increased the size of the files that are cached from 10MB to 100MB. After this change once a day Varnish crashes with the following error message: {{{ Child (5730) Panic message: Missing errorhandling code in HSH_Purge(), cache_hash.c line 557: Condition(spc >= sizeof *ocp) not true.thread = (cache-worker) ident = Linux,3.10.37-47.135.amzn1.x86_64,x86_64,-sfile,-smalloc,-hcritbit,epoll Backtrace: 0x42f308: /usr/sbin/varnishd() [0x42f308] 0x427eb8: /usr/sbin/varnishd(HSH_Purge+0x478) [0x427eb8] 0x7f42334f6af5: ./vcl.CMxtcaiB.so(VGC_function_vcl_hit+0x75) [0x7f42334f6af5] 0x436243: /usr/sbin/varnishd(VCL_hit_method+0x43) [0x436243] 0x415fd3: /usr/sbin/varnishd() [0x415fd3] 0x418da5: /usr/sbin/varnishd(CNT_Session+0x6a5) [0x418da5] 0x431051: /usr/sbin/varnishd() [0x431051] 0x7f423bb75f18: /lib64/libpthread.so.0(+0x7f18) [0x7f423bb75f18] 0x7f423b8abe0d: /lib64/libc.so.6(clone+0x6d) [0x7f423b8abe0d] sp = 0x7e441fa48008 { fd = 70, id = 70, xid = 1910255048, client = 10.xxx.xxx.xxx 47837, step = STP_HIT, handling = deliver, restarts = 0, esi_level = 0 flags = bodystatus = 5 ws = 0x7e441fa48080 { id = "sess", {s,f,r,e} = {0x7e441fa48c78,+232,(nil),+65536}, }, http[req] = { ws = 0x7e441fa48080[sess] "X-PURGE", "/ValidationFunctions.js", "HTTP/1.1", "Host: xxxxxxxxxx ", "queryparams: ", "Vary: queryparams", }, worker = 0x7e4549e20b30 { ws = 0x7e4549e20d68 { id = "wrk", {s,f,r,e} = {0x7e4549e0eac0,+40,+65536,+65536}, }, }, vcl = { srcname = { "input", "Default", "backend.vcl", "security.vcl", }, }, obj = 0x7f41f3a87000 { xid = 1884579434, ws = 0x7f41f3a87018 { id = "obj", {s,f,r,e} = {0x7f41f3a87228,+408,(nil),+432}, }, http[obj] = { ws = 0x7f41f3a87018[obj] "HTTP/1.1", "OK", "Cache-Control: public,m }}} I have disabled transparent hugepages as mentioned here - https://www .varnish-cache.org/docs/3.0/tutorial/platformnotes.html however this didn't fixed the issue. I'm monitoring all varnish stats so if you need any other information please let me know. Looking forward to hearing from you. Regards, Andrei -- -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Tue Jul 22 08:56:18 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Tue, 22 Jul 2014 08:56:18 -0000 Subject: [Varnish] #1551: Panic message: Missing errorhandling code in HSH_Purge() In-Reply-To: <043.53e32d86a2b629abfc16269dbd859a95@varnish-cache.org> References: <043.53e32d86a2b629abfc16269dbd859a95@varnish-cache.org> Message-ID: <058.8587ad1a87134db30738494601775114@varnish-cache.org> #1551: Panic message: Missing errorhandling code in HSH_Purge() ----------------------+---------------------- Reporter: aduca | Owner: Type: defect | Status: new Priority: normal | Milestone: Component: varnishd | Version: unknown Severity: normal | Resolution: Keywords: | ----------------------+---------------------- Comment (by lkarsten): I suspect this is a sess_workspace overrun. Can you please increase the sess_workspace parameter and see if this goes away? -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Tue Jul 22 09:05:24 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Tue, 22 Jul 2014 09:05:24 -0000 Subject: [Varnish] #1521: Varnish 4 VCL compilation failed on OSX x86_64 (was: Varnish 4 VCL compilation failed on x86_64) In-Reply-To: <046.e981867b3cf05ae3a7f8c85fb022c396@varnish-cache.org> References: <046.e981867b3cf05ae3a7f8c85fb022c396@varnish-cache.org> Message-ID: <061.6f2614e8a3ec5c311868f58d7f5a1db3@varnish-cache.org> #1521: Varnish 4 VCL compilation failed on OSX x86_64 ----------------------+----------------------- Reporter: yoloseem | Owner: Type: defect | Status: needinfo Priority: normal | Milestone: Component: build | Version: 4.0.0 Severity: normal | Resolution: Keywords: | ----------------------+----------------------- -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Tue Jul 22 09:07:43 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Tue, 22 Jul 2014 09:07:43 -0000 Subject: [Varnish] #1402: Failed dependencies: libvarnishapi.so.1 In-Reply-To: <046.8f382fc03c0717150d85139dbc82a371@varnish-cache.org> References: <046.8f382fc03c0717150d85139dbc82a371@varnish-cache.org> Message-ID: <061.27a0fff0401299f4727cfde43f00dbdb@varnish-cache.org> #1402: Failed dependencies: libvarnishapi.so.1 -----------------------+------------------------------ Reporter: dmalinow | Owner: lkarsten Type: defect | Status: closed Priority: normal | Milestone: Varnish 4.0-TP1 Component: packaging | Version: trunk Severity: major | Resolution: fixed Keywords: | -----------------------+------------------------------ Changes (by lkarsten): * status: new => closed * resolution: => fixed Comment: This was fixed with 4.0.1. Closing. -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Tue Jul 22 09:31:38 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Tue, 22 Jul 2014 09:31:38 -0000 Subject: [Varnish] #1503: return(restart) seems to be changed into return(retry) In-Reply-To: <041.cf2573d353ec972ade6717eda53bca06@varnish-cache.org> References: <041.cf2573d353ec972ade6717eda53bca06@varnish-cache.org> Message-ID: <056.e8676bddfeae07c7f6ce0bde70232dac@varnish-cache.org> #1503: return(restart) seems to be changed into return(retry) ---------------------------+--------------------- Reporter: Tin | Owner: scoof Type: defect | Status: closed Priority: normal | Milestone: Component: documentation | Version: 4.0.0 Severity: normal | Resolution: fixed Keywords: return(retry) | ---------------------------+--------------------- Changes (by Lasse Karstensen ): * status: new => closed * resolution: => fixed Comment: In [5e5d25470863490365c791f13233bb546daab4c6]: {{{ #!CommitTicketReference repository="" revision="5e5d25470863490365c791f13233bb546daab4c6" Document the new return(retry). Backend restarts are now return(retry). Fixes: #1503 }}} -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Wed Jul 23 04:45:50 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Wed, 23 Jul 2014 04:45:50 -0000 Subject: [Varnish] #1521: Varnish 4 VCL compilation failed on OSX x86_64 In-Reply-To: <046.e981867b3cf05ae3a7f8c85fb022c396@varnish-cache.org> References: <046.e981867b3cf05ae3a7f8c85fb022c396@varnish-cache.org> Message-ID: <061.ef00b33e4ebedfc2cecbbcd9c2de85cc@varnish-cache.org> #1521: Varnish 4 VCL compilation failed on OSX x86_64 ----------------------+----------------------- Reporter: yoloseem | Owner: Type: defect | Status: needinfo Priority: normal | Milestone: Component: build | Version: 4.0.0 Severity: normal | Resolution: Keywords: | ----------------------+----------------------- Comment (by Corey): I believe I am running into a similar issue where the privileges are dropped, I verified that this issue does not exist in 3.x. Our environment is CentOS 6.5 x86_64. The permissions on gcc are restricted on our system, because of this the VCLs will not compile. If I chmod \ re-own gcc the issue goes away. -rwxr-x--- 2 root compiler 263952 Nov 21 2013 /usr/bin/gcc* <-- does not work -rwxr-xr-x 2 root compiler 263952 Nov 21 2013 /usr/bin/gcc* <-- works I'm attaching an strace including a separate one for each child. Let me know if further information is required. Thanks. -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Wed Jul 23 09:35:53 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Wed, 23 Jul 2014 09:35:53 -0000 Subject: [Varnish] #1552: Panic Message - current master cache_session.c Message-ID: <048.a7531cbfbed6e9ef08bb1911e07efbd6@varnish-cache.org> #1552: Panic Message - current master cache_session.c ------------------------+-------------------- Reporter: hjanuschka | Type: defect Status: new | Priority: normal Milestone: | Component: build Version: trunk | Severity: normal Keywords: | ------------------------+-------------------- after upgrade to current master: ac4e79e5ec3d037bfe0c009dd53d266368d1108d varnish keeps on crashing - syslog messages show following Panic message:#012Assert error in SES_ReleaseReq(), cache/cache_session.c line 401:#012 Condition((req->vsl->wid) == 0) not true.#012thread = (cache-worker)#012ident = Linux,3.2.0-4-amd64,x86_64,-smalloc,-smalloc,-hcritbit,epoll#012Backtrace:#012 0x434c45: pan_ic+0xc5#012 0x43e648: SES_ReleaseReq+0x208#012 0x42e663: HTTP1_Session+0x663#012 0x43d188: ses_req_pool_task+0x68#012 0x43e1f9: SES_pool_accept_task+0x299#012 0x437b00: Pool_Work_Thread+0x370#012 0x44a3be: wrk_thread_real+0xae#012 0x7ffe9fceeb50: /lib/x86_64-linux- gnu/libpthread.so.0(+0x6b50) [0x7ffe9fceeb50]#012 0x7ffe9fa390ed: /lib/x86_64-linux-gnu/libc.so.6(clone+0x6d) [0x7ffe9fa390ed]#012req = 0xda2500 {#012 sp = 0x7ffe90021410, vxid = 262152, step = R_STP_RESTART,#012 req_body = R_BODY_INIT,#012 restarts = 0, esi_level = 0#012 sp = 0x7ffe90021410 {#012 fd = 13, vxid = 262151,#012 client = 46.75.92.174 50048,#012 step = S_STP_NEWREQ,#012 },#012 ws = 0xda2690 {#012 id = "req",#012 {s,f,r,e} = {0xda44e8,0xda44e8,+32768,+57368},#012 },#012 http[req] = {#012 ws = (nil)[]#012 },#012},#012 before the upgrade i stopped varnish - and remove the vsm file. calling varnishlog often returned: "Log abandoned Log reacquired" varnishadm sometimes returned: "root at loki /opt/varnish/bin $ ./varnishadm Cannot open /opt/varnish/var/varnish/loki/_.vsm: No such file or directory " system: debian 7 - amd64 - all updates installed after downgrading back to my last-known-good commit - id everthing works -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Wed Jul 23 12:25:44 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Wed, 23 Jul 2014 12:25:44 -0000 Subject: [Varnish] #1552: Panic Message - current master cache_session.c In-Reply-To: <048.a7531cbfbed6e9ef08bb1911e07efbd6@varnish-cache.org> References: <048.a7531cbfbed6e9ef08bb1911e07efbd6@varnish-cache.org> Message-ID: <063.e7b0e41dd1f5a48f1970e6c6340f6ea9@varnish-cache.org> #1552: Panic Message - current master cache_session.c ------------------------+-------------------- Reporter: hjanuschka | Owner: Type: defect | Status: new Priority: normal | Milestone: Component: build | Version: trunk Severity: normal | Resolution: Keywords: | ------------------------+-------------------- Description changed by lkarsten: Old description: > after upgrade to current master: ac4e79e5ec3d037bfe0c009dd53d266368d1108d > > varnish keeps on crashing - syslog messages show following > > Panic message:#012Assert error in SES_ReleaseReq(), cache/cache_session.c > line 401:#012 Condition((req->vsl->wid) == 0) not true.#012thread = > (cache-worker)#012ident = > Linux,3.2.0-4-amd64,x86_64,-smalloc,-smalloc,-hcritbit,epoll#012Backtrace:#012 > 0x434c45: pan_ic+0xc5#012 0x43e648: SES_ReleaseReq+0x208#012 0x42e663: > HTTP1_Session+0x663#012 0x43d188: ses_req_pool_task+0x68#012 0x43e1f9: > SES_pool_accept_task+0x299#012 0x437b00: Pool_Work_Thread+0x370#012 > 0x44a3be: wrk_thread_real+0xae#012 0x7ffe9fceeb50: /lib/x86_64-linux- > gnu/libpthread.so.0(+0x6b50) [0x7ffe9fceeb50]#012 0x7ffe9fa390ed: > /lib/x86_64-linux-gnu/libc.so.6(clone+0x6d) [0x7ffe9fa390ed]#012req = > 0xda2500 {#012 sp = 0x7ffe90021410, vxid = 262152, step = > R_STP_RESTART,#012 req_body = R_BODY_INIT,#012 restarts = 0, esi_level > = 0#012 sp = 0x7ffe90021410 {#012 fd = 13, vxid = 262151,#012 > client = 46.75.92.174 50048,#012 step = S_STP_NEWREQ,#012 },#012 ws > = 0xda2690 {#012 id = "req",#012 {s,f,r,e} = > {0xda44e8,0xda44e8,+32768,+57368},#012 },#012 http[req] = {#012 ws = > (nil)[]#012 },#012},#012 > > before the upgrade i stopped varnish - and remove the vsm file. > calling varnishlog often returned: > "Log abandoned > Log reacquired" > > varnishadm sometimes returned: > "root at loki /opt/varnish/bin $ ./varnishadm > Cannot open /opt/varnish/var/varnish/loki/_.vsm: No such file or > directory > " > > > system: debian 7 - amd64 - all updates installed > > after downgrading back to my last-known-good commit - id everthing works New description: after upgrade to current master: ac4e79e5ec3d037bfe0c009dd53d266368d1108d varnish keeps on crashing - syslog messages show following {{{ Panic message: Assert error in SES_ReleaseReq(), cache/cache_session.c line 401: Condition((req->vsl->wid) == 0) not true. thread = (cache-worker) ident = Linux,3.2.0-4-amd64,x86_64,-smalloc,-smalloc,-hcritbit,epoll Backtrace: 0x434c45: pan_ic+0xc5 0x43e648: SES_ReleaseReq+0x208 0x42e663: HTTP1_Session+0x663 0x43d188: ses_req_pool_task+0x68 0x43e1f9: SES_pool_accept_task+0x299 0x437b00: Pool_Work_Thread+0x370 0x44a3be: wrk_thread_real+0xae 0x7ffe9fceeb50: /lib/x86_64-linux-gnu/libpthread.so.0(+0x6b50) [0x7ffe9fceeb50] 0x7ffe9fa390ed: /lib/x86_64-linux-gnu/libc.so.6(clone+0x6d) [0x7ffe9fa390ed] req = 0xda2500 { sp = 0x7ffe90021410, vxid = 262152, step = R_STP_RESTART, req_body = R_BODY_INIT, restarts = 0, esi_level = 0 sp = 0x7ffe90021410 { fd = 13, vxid = 262151, client = 46.75.92.174 50048, step = S_STP_NEWREQ, }, ws = 0xda2690 { id = "req", {s,f,r,e} = {0xda44e8,0xda44e8,+32768,+57368}, }, http[req] = { ws = (nil)[] }, }, }}} before the upgrade i stopped varnish - and remove the vsm file. calling varnishlog often returned: "Log abandoned Log reacquired" varnishadm sometimes returned: "root at loki /opt/varnish/bin $ ./varnishadm Cannot open /opt/varnish/var/varnish/loki/_.vsm: No such file or directory " system: debian 7 - amd64 - all updates installed after downgrading back to my last-known-good commit - id everthing works -- -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Wed Jul 23 13:37:04 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Wed, 23 Jul 2014 13:37:04 -0000 Subject: [Varnish] #1553: Assert error in VRY_Prep(), cache/cache_vary.c line 228 Message-ID: <048.d61504fcef043c3b9a266da23e519164@varnish-cache.org> #1553: Assert error in VRY_Prep(), cache/cache_vary.c line 228 ----------------------------+---------------------- Reporter: esfourteen | Type: defect Status: new | Priority: normal Milestone: | Component: varnishd Version: 4.0.1 | Severity: normal Keywords: panic vry_prep | ----------------------------+---------------------- I'm using the vsthrottle vmod written by daghf (https://github.com/daghf /libvmod-vsthrottle), im using this in vcl_miss to stop people from hitting my backend servers when they request too often: {{{ sub vcl_miss { if (req.url ~ "track_history/channel") { # per client check if(vsthrottle.is_denied("api:track_history:channel:ip:" + client.ip, 4, 10s)) { return (synth(429)); } # global check if(vsthrottle.is_denied("api:track_history:channel", 75, 3s)) { return (synth(429)); } } } }}} This will randomly cause a panic in the child, attached here: https://gist.github.com/esfourteen/88de06e0ce2335061c19 This seems to be suggesting to do some Vary checks for caching, which is confusing because: * I don't send a Vary header from my backend * this is hitting vcl_miss which should indicate its determined there is no viable cached response * the panic shows the response already set, but the step indicates its doing a lookup? If I move the vsthrottle check to vcl_backend_fetch and return (abort) instead of synth, it never panics: {{{ sub vcl_backend_fetch { if (bereq.url ~ "track_history/channel") { if(vsthrottle.is_denied("api:track_history:channel:ip:" + client.ip, 4, 10s)) { return (abandon); } if(vsthrottle.is_denied("api:track_history:channel", 75, 3s)) { return (abandon); } } } }}} So while the above does work, my clients are receiving a 503 error instead of the 429 which is what I need ideally. -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Wed Jul 23 13:39:31 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Wed, 23 Jul 2014 13:39:31 -0000 Subject: [Varnish] #1552: Panic Message - current master cache_session.c In-Reply-To: <048.a7531cbfbed6e9ef08bb1911e07efbd6@varnish-cache.org> References: <048.a7531cbfbed6e9ef08bb1911e07efbd6@varnish-cache.org> Message-ID: <063.1d72f7d1131914aaccf313a6338331a4@varnish-cache.org> #1552: Panic Message - current master cache_session.c ------------------------+-------------------- Reporter: hjanuschka | Owner: Type: defect | Status: new Priority: normal | Milestone: Component: build | Version: trunk Severity: normal | Resolution: Keywords: | ------------------------+-------------------- Comment (by lkarsten): r01284.vtc is tripped by this on the jenkins build server: {{{ * top 1.9 RESETTING after ../../../bin/varnishtest/tests/r01284.vtc ** s1 1.9 Waiting for server **** s1 1.9 macro undef s1_addr **** s1 1.9 macro undef s1_port **** s1 1.9 macro undef s1_sock *** v1 1.9 debug| Child (17382) died signal=6\n *** v1 1.9 debug| Child (17382) Panic message:\n *** v1 1.9 debug| Assert error in SES_ReleaseReq(), ../../../bin/varnishd/cache/cache_session.c line 401:\n *** v1 1.9 debug| Condition((req->vsl->wid) == 0) not true.\n *** v1 1.9 debug| errno = 104 (Connection reset by peer)\n *** v1 1.9 debug| thread = (cache-worker)\n *** v1 1.9 debug| ident = Linux,3.2.0-65-virtual,x86_64,-smalloc,-hcritbit,epoll\n *** v1 1.9 debug| Backtrace:\n *** v1 1.9 debug| 0x43ce4f: pan_backtrace+0x19\n *** v1 1.9 debug| 0x43d160: pan_ic+0x1e9\n *** v1 1.9 debug| 0x449222: SES_ReleaseReq+0x1e7\n *** v1 1.9 debug| 0x43382e: http1_wait+0x6b2\n *** v1 1.9 debug| 0x434ce3: HTTP1_Session+0x511\n *** v1 1.9 debug| 0x447b83: ses_req_pool_task+0x166\n *** v1 1.9 debug| 0x447e4e: ses_sess_pool_task+0x23b\n *** v1 1.9 debug| 0x4400e6: Pool_Work_Thread+0x4cb\n *** v1 1.9 debug| 0x4583e2: wrk_thread_real+0x204\n *** v1 1.9 debug| 0x45853a: WRK_thread+0x27\n *** v1 1.9 debug| req = 0x2aafb409f020 {\n *** v1 1.9 debug| sp = 0x2aafb440f1a0, vxid = 1006, step = R_STP_RESTART,\n *** v1 1.9 debug| req_body = R_BODY_INIT,\n *** v1 1.9 debug| restarts = 0, esi_level = 0\n *** v1 1.9 debug| sp = 0x2aafb440f1a0 {\n *** v1 1.9 debug| fd = 12, vxid = 1003,\n *** v1 1.9 debug| client = 127.0.0.1 34339,\n *** v1 1.9 debug| step = S_STP_NEWREQ,\n *** v1 1.9 debug| },\n *** v1 1.9 debug| ws = 0x2aafb409f1b0 {\n *** v1 1.9 debug| id = "req",\n *** v1 1.9 debug| {s,f,r,e} = {0x2aafb40a1008,0x2aafb40a1008,(nil),+57368},\n *** v1 1.9 debug| },\n *** v1 1.9 debug| http[req] = {\n *** v1 1.9 debug| ws = (nil)[]\n *** v1 1.9 debug| },\n *** v1 1.9 debug| },\n *** v1 1.9 debug| \n *** v1 1.9 debug| \n *** v1 1.9 debug| Child cleanup complete\n }}} Source: https://jenkins.varnish-software.com/view/varnish-master/job /varnish-master-src/2174/console This does not happen on my debian jessie workstation. -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Wed Jul 23 14:09:25 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Wed, 23 Jul 2014 14:09:25 -0000 Subject: [Varnish] #1552: Panic Message - current master cache_session.c In-Reply-To: <048.a7531cbfbed6e9ef08bb1911e07efbd6@varnish-cache.org> References: <048.a7531cbfbed6e9ef08bb1911e07efbd6@varnish-cache.org> Message-ID: <063.0b59282723b3a8e5f961b617e43c2f10@varnish-cache.org> #1552: Panic Message - current master cache_session.c ------------------------+----------------------- Reporter: hjanuschka | Owner: Type: defect | Status: needinfo Priority: normal | Milestone: Component: build | Version: trunk Severity: normal | Resolution: Keywords: | ------------------------+----------------------- Changes (by phk): * status: new => needinfo Comment: This should be fixed in 2b9d23d586777d3f38df45d54ba6a5614ac4cce5 Please report back. -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Wed Jul 23 14:20:38 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Wed, 23 Jul 2014 14:20:38 -0000 Subject: [Varnish] #1552: Panic Message - current master cache_session.c In-Reply-To: <048.a7531cbfbed6e9ef08bb1911e07efbd6@varnish-cache.org> References: <048.a7531cbfbed6e9ef08bb1911e07efbd6@varnish-cache.org> Message-ID: <063.8016233e0661fbcf59a86e646b58d499@varnish-cache.org> #1552: Panic Message - current master cache_session.c ------------------------+----------------------- Reporter: hjanuschka | Owner: Type: defect | Status: needinfo Priority: normal | Milestone: Component: build | Version: trunk Severity: normal | Resolution: Keywords: | ------------------------+----------------------- Comment (by hjanuschka): ok with 2b9d23d it seems to be fixed - will have an eye on it - in that short window only a few GB's went through - but without that patch - the scenario happend a few seconds after launch. the varnishlog "abandond" stuff is gone also -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Wed Jul 23 14:33:28 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Wed, 23 Jul 2014 14:33:28 -0000 Subject: [Varnish] #1553: Assert error in VRY_Prep(), cache/cache_vary.c line 228 In-Reply-To: <048.d61504fcef043c3b9a266da23e519164@varnish-cache.org> References: <048.d61504fcef043c3b9a266da23e519164@varnish-cache.org> Message-ID: <063.f8ba08f5d63f5534e86a18f84ee1328b@varnish-cache.org> #1553: Assert error in VRY_Prep(), cache/cache_vary.c line 228 ----------------------------+-------------------- Reporter: esfourteen | Owner: Type: defect | Status: new Priority: normal | Milestone: Component: varnishd | Version: 4.0.1 Severity: normal | Resolution: Keywords: panic vry_prep | ----------------------------+-------------------- Comment (by lkarsten): Can you please provide the VCL in use? This looks like something corrupts a bit of memory, and when we reuse it for the next request it fails the internal consistency checks. Do you get multiple panics? Are they always similar to this one? -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Wed Jul 23 15:21:35 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Wed, 23 Jul 2014 15:21:35 -0000 Subject: [Varnish] #1553: Assert error in VRY_Prep(), cache/cache_vary.c line 228 In-Reply-To: <048.d61504fcef043c3b9a266da23e519164@varnish-cache.org> References: <048.d61504fcef043c3b9a266da23e519164@varnish-cache.org> Message-ID: <063.e0b46ec0bf7b2eaa7b58e69f723d008f@varnish-cache.org> #1553: Assert error in VRY_Prep(), cache/cache_vary.c line 228 ----------------------------+-------------------- Reporter: esfourteen | Owner: Type: defect | Status: new Priority: normal | Milestone: Component: varnishd | Version: 4.0.1 Severity: normal | Resolution: Keywords: panic vry_prep | ----------------------------+-------------------- Comment (by esfourteen): Full VCL is attached but I removed my backend server hosts. When the panic happens the child restarts and stays running for some time, most uptime I saw from a child was about 30 minutes before it restarted. The panic is always the same and the request identical. -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Wed Jul 23 17:16:42 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Wed, 23 Jul 2014 17:16:42 -0000 Subject: [Varnish] #1552: Panic Message - current master cache_session.c In-Reply-To: <048.a7531cbfbed6e9ef08bb1911e07efbd6@varnish-cache.org> References: <048.a7531cbfbed6e9ef08bb1911e07efbd6@varnish-cache.org> Message-ID: <063.94b0b7b495b3c578a13ce1c0b0dfbe74@varnish-cache.org> #1552: Panic Message - current master cache_session.c ------------------------+----------------------- Reporter: hjanuschka | Owner: Type: defect | Status: needinfo Priority: normal | Milestone: Component: build | Version: trunk Severity: normal | Resolution: Keywords: | ------------------------+----------------------- Comment (by hjanuschka): issues seems to be gone - done a few TB's, thanks for the fast fix!! -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Thu Jul 24 07:11:41 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Thu, 24 Jul 2014 07:11:41 -0000 Subject: [Varnish] #1554: Panic Message - current master V1F_Setup_Fetch(), cache/cache_http1_fetch.c line 180 Message-ID: <048.e42c479ec50419283469017e67620cc5@varnish-cache.org> #1554: Panic Message - current master V1F_Setup_Fetch(), cache/cache_http1_fetch.c line 180 ------------------------+-------------------- Reporter: hjanuschka | Type: defect Status: new | Priority: normal Milestone: | Component: build Version: trunk | Severity: normal Keywords: | ------------------------+-------------------- i am on 2b9d23d every once in a while i get following panic's in logfile. {{{ Panic message:#012Assert error in V1F_Setup_Fetch(), cache/cache_http1_fetch.c line 180:#012 Condition(bo->content_length == -1) not true.#012thread = (cache-worker)#012ident = Linux,3.2.0-4-amd64,x86_64,-smalloc,-smalloc,-hcritbit,epoll#012Backtrace:#012 0x434c45: pan_ic+0xc5#012 0x42cd5e: V1F_Setup_Fetch+0x23e#012 0x421c34: vbf_fetch_thread+0x1544#012 0x437b00: Pool_Work_Thread+0x370#012 0x44a3ae: wrk_thread_real+0xae#012 0x7f919d528b50: /lib/x86_64-linux- gnu/libpthread.so.0(+0x6b50) [0x7f919d528b50]#012 0x7f919d27320d: /lib/x86_64-linux-gnu/libc.so.6(clone+0x6d) [0x7f919d27320d]#012 busyobj = 0x6fcd890 {#012 ws = 0x6fcd948 {#012 id = "bo",#012 {s,f,r,e} = {0x6fcf840,+1808,(nil),+57424},#012 },#012 refcnt = 2#012 retries = 1#012 failed = 0#012 state = 1#012 is_do_stream#012 is_is_gunzip#012 bodystatus = 2 (chunked),#012 },#012 http[bereq] = {#012 ws = 0x6fcd948[bo]#012 "GET",#012 "/krone/kmcnt/oewa_info.hbs?output_type=json&domain=tablet.krone.at&url=/28/Startseite",#012 "HTTP/1.1",#012 "X-Akamai-CONFIG-LOG-DETAIL: true",#012 "Akamai-Origin-Hop: 1",#012 "Via: 1.0 akamai.net(ghost) (AkamaiGHost)",#012 "True-Client-IP: 213.164.6.215",#012 "Host: www.krone.at",#012 "Pragma: no-cache",#012 "Accept- Encoding: gzip",#012 "X-Forwarded-For: 213.164.6.215, 23.14.92.78, 23.14.92.78",#012 "X-passthrough-headers: no",#012 "X -Varnish-TTL: 0",#012 "X-Varnish-Cache: MISS",#012 "X-Varnish: 6520952",#012 },#012 http[beresp] = {#012 ws = 0x6fcd948[bo]#012 "HTTP/1.1",#012 "200",#012 "OK",#012 "Date: Thu, 24 Jul 2014 04:58:23 GMT",#012 "Server: Apache",#012 "Content-Type: application/x-javascript",#012 "Cache-Control: max-age=2592000",#012 "Expires: Sat, 23 Aug 2014 04:58:23 GMT",#012 "Transfer-Encoding: chunked",#012 "X -Varnish-BE: hps_director",#012 }}} -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Thu Jul 24 09:30:08 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Thu, 24 Jul 2014 09:30:08 -0000 Subject: [Varnish] #1552: Panic Message - current master cache_session.c In-Reply-To: <048.a7531cbfbed6e9ef08bb1911e07efbd6@varnish-cache.org> References: <048.a7531cbfbed6e9ef08bb1911e07efbd6@varnish-cache.org> Message-ID: <063.c3269a0922a84d273920022a1776060e@varnish-cache.org> #1552: Panic Message - current master cache_session.c ------------------------+--------------------- Reporter: hjanuschka | Owner: Type: defect | Status: closed Priority: normal | Milestone: Component: build | Version: trunk Severity: normal | Resolution: fixed Keywords: | ------------------------+--------------------- Changes (by phk): * status: needinfo => closed * resolution: => fixed -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Thu Jul 24 11:26:30 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Thu, 24 Jul 2014 11:26:30 -0000 Subject: [Varnish] #1554: Panic Message - current master V1F_Setup_Fetch(), cache/cache_http1_fetch.c line 180 In-Reply-To: <048.e42c479ec50419283469017e67620cc5@varnish-cache.org> References: <048.e42c479ec50419283469017e67620cc5@varnish-cache.org> Message-ID: <063.c50c82286dd87520f5e88393d97f0145@varnish-cache.org> #1554: Panic Message - current master V1F_Setup_Fetch(), cache/cache_http1_fetch.c line 180 ------------------------+-------------------- Reporter: hjanuschka | Owner: Type: defect | Status: new Priority: normal | Milestone: Component: build | Version: trunk Severity: normal | Resolution: Keywords: | ------------------------+-------------------- Description changed by lkarsten: Old description: > i am on 2b9d23d > > every once in a while i get following panic's in logfile. > > {{{ > Panic message:#012Assert error in V1F_Setup_Fetch(), > cache/cache_http1_fetch.c line 180:#012 Condition(bo->content_length == > -1) not true.#012thread = (cache-worker)#012ident = > Linux,3.2.0-4-amd64,x86_64,-smalloc,-smalloc,-hcritbit,epoll#012Backtrace:#012 > 0x434c45: pan_ic+0xc5#012 0x42cd5e: V1F_Setup_Fetch+0x23e#012 0x421c34: > vbf_fetch_thread+0x1544#012 0x437b00: Pool_Work_Thread+0x370#012 > 0x44a3ae: wrk_thread_real+0xae#012 0x7f919d528b50: /lib/x86_64-linux- > gnu/libpthread.so.0(+0x6b50) [0x7f919d528b50]#012 0x7f919d27320d: > /lib/x86_64-linux-gnu/libc.so.6(clone+0x6d) [0x7f919d27320d]#012 busyobj > = 0x6fcd890 {#012 ws = 0x6fcd948 {#012 id = "bo",#012 > {s,f,r,e} = {0x6fcf840,+1808,(nil),+57424},#012 },#012 refcnt = 2#012 > retries = 1#012 failed = 0#012 state = 1#012 is_do_stream#012 > is_is_gunzip#012 bodystatus = 2 (chunked),#012 },#012 > http[bereq] = {#012 ws = 0x6fcd948[bo]#012 "GET",#012 > "/krone/kmcnt/oewa_info.hbs?output_type=json&domain=tablet.krone.at&url=/28/Startseite",#012 > "HTTP/1.1",#012 "X-Akamai-CONFIG-LOG-DETAIL: true",#012 > "Akamai-Origin-Hop: 1",#012 "Via: 1.0 akamai.net(ghost) > (AkamaiGHost)",#012 "True-Client-IP: 213.164.6.215",#012 > "Host: www.krone.at",#012 "Pragma: no-cache",#012 "Accept- > Encoding: gzip",#012 "X-Forwarded-For: 213.164.6.215, 23.14.92.78, > 23.14.92.78",#012 "X-passthrough-headers: no",#012 "X > -Varnish-TTL: 0",#012 "X-Varnish-Cache: MISS",#012 > "X-Varnish: 6520952",#012 },#012 http[beresp] = {#012 ws = > 0x6fcd948[bo]#012 "HTTP/1.1",#012 "200",#012 > "OK",#012 "Date: Thu, 24 Jul 2014 04:58:23 GMT",#012 > "Server: Apache",#012 "Content-Type: > application/x-javascript",#012 "Cache-Control: max- > age=2592000",#012 "Expires: Sat, 23 Aug 2014 04:58:23 GMT",#012 > "Transfer-Encoding: chunked",#012 "X-Varnish-BE: > hps_director",#012 > }}} New description: i am on 2b9d23d every once in a while i get following panic's in logfile. {{{ Panic message: Assert error in V1F_Setup_Fetch(), cache/cache_http1_fetch.c line 180: Condition(bo->content_length == -1) not true. thread = (cache-worker) ident = Linux,3.2.0-4-amd64,x86_64,-smalloc,-smalloc,-hcritbit,epoll Backtrace: 0x434c45: pan_ic+0xc5 0x42cd5e: V1F_Setup_Fetch+0x23e 0x421c34: vbf_fetch_thread+0x1544 0x437b00: Pool_Work_Thread+0x370 0x44a3ae: wrk_thread_real+0xae 0x7f919d528b50: /lib/x86_64-linux-gnu/libpthread.so.0(+0x6b50) [0x7f919d528b50] 0x7f919d27320d: /lib/x86_64-linux-gnu/libc.so.6(clone+0x6d) [0x7f919d27320d] busyobj = 0x6fcd890 { ws = 0x6fcd948 { id = "bo", {s,f,r,e} = {0x6fcf840,+1808,(nil),+57424}, }, refcnt = 2 retries = 1 failed = 0 state = 1 is_do_stream is_is_gunzip bodystatus = 2 (chunked), }, http[bereq] = { ws = 0x6fcd948[bo] "GET", "/krone/kmcnt/oewa_info.hbs?output_type=json&domain=tablet.krone.at&url=/28/Startseite", "HTTP/1.1", "X-Akamai-CONFIG-LOG-DETAIL: true", "Akamai-Origin-Hop: 1", "Via: 1.0 akamai.net(ghost) (AkamaiGHost)", "True-Client-IP: 213.164.6.215", "Host: www.krone.at", "Pragma: no-cache", "Accept-Encoding: gzip", "X-Forwarded-For: 213.164.6.215, 23.14.92.78, 23.14.92.78", "X-passthrough-headers: no", "X-Varnish-TTL: 0", "X-Varnish-Cache: MISS", "X-Varnish: 6520952", }, http[beresp] = { ws = 0x6fcd948[bo] "HTTP/1.1", "200", "OK", "Date: Thu, 24 Jul 2014 04:58:23 GMT", "Server: Apache", "Content-Type: application/x-javascript", "Cache-Control: max-age=2592000", "Expires: Sat, 23 Aug 2014 04:58:23 GMT", "Transfer-Encoding: chunked", "X-Varnish-BE: hps_director", }}} -- -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Thu Jul 24 11:53:21 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Thu, 24 Jul 2014 11:53:21 -0000 Subject: [Varnish] #1555: configure not checking for readline.h, giving make error after fixing it. Message-ID: <041.90d39daf5df6b4940eecfab0845b4b93@varnish-cache.org> #1555: configure not checking for readline.h, giving make error after fixing it. -------------------------------------------------+------------------------- Reporter: Tin | Type: defect Status: new | Priority: normal Milestone: | Component: build Version: 4.0.1 | Severity: normal Keywords: readline.h not checked by configure | script | -------------------------------------------------+------------------------- When trying to compile Varnish 4.0.1 on a Cubietruck I ran into the following issue; make complained about readline.h missing, so obviously the configure script didn't complain about that. As the libedit package provied readline.h that issue was easy enough to fix. When blunty retrying make, without make clean or anything, I ran in the error shown in the file error.txt. The correspondig config.log is attached as config.log.wrong. When doing a fresh rebuild with a clean autogen.sh, ./configure and make, compiling did work without an issue. The config.log belonging to that is attached as config.log.good. Cheers, Martin Boer -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Thu Jul 24 13:24:03 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Thu, 24 Jul 2014 13:24:03 -0000 Subject: [Varnish] #1556: varnishlog will not run (LIBVARNISHAPI_1.2 & LIBVARNISHAPI_1.3 not found) Message-ID: <041.e676bf3287d1e96a45e1f9cf3159d57f@varnish-cache.org> #1556: varnishlog will not run (LIBVARNISHAPI_1.2 & LIBVARNISHAPI_1.3 not found) -------------------+-------------------- Reporter: Tin | Type: defect Status: new | Priority: normal Milestone: | Component: build Version: 4.0.1 | Severity: normal Keywords: | -------------------+-------------------- After installing Varnish 4.0.1 varnishlog wouldn't run with the following error: varnishlog: /usr/lib/arm-linux-gnueabihf/libvarnishapi.so.1: version `LIBVARNISHAPI_1.2' not found (required by varnishlog) varnishlog: /usr/lib/arm-linux-gnueabihf/libvarnishapi.so.1: version `LIBVARNISHAPI_1.3' not found (required by varnishlog) This is because an installation of Varnish 3.05 installed these files, and ldconfig only keeps one version, in this case the above ones. After manually removing the above files, and running ldconfig again, varnishlog found the correct files in /usr/local/lib/ Cheers, Martin Boer -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Thu Jul 24 14:43:53 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Thu, 24 Jul 2014 14:43:53 -0000 Subject: [Varnish] #1557: regsuball() doesn't honor lookahead assertion on repeated strings Message-ID: <059.714287d6d2b95c176d62f92faff8648e@varnish-cache.org> #1557: regsuball() doesn't honor lookahead assertion on repeated strings --------------------------------------+---------------------- Reporter: varnish@? | Type: defect Status: new | Priority: normal Milestone: | Component: varnishd Version: trunk | Severity: normal Keywords: lookahead pcre regsuball | --------------------------------------+---------------------- Hi Varnish-Team, the function regsuball doesn't honor lookahead assertion in a regular expression correctly, if a regex matches against sequenced or repeated strings. Example: If you strip some query strings and match for different query strings in one regex and the query strings are sequenced one after another, not all strings matches: {{{ sub vcl_backend_response { set bereq.url = "/?id=23&name=foo&age=42"; set beresp.http.foo = regsuball(bereq.url, "(?<=[&\?])(id|name|age)=[^&]+(?:&|$)", ""); } }}} The expected result should be: {{{ EXPECT resp.http.foo == "/?" }}} Result is: {{{ resp.http.foo /?name=foo& }}} This error occures in bin/varnishd/cache/cache_vrt_re.c, in function VRT_regsub. The matched substring is some kind of cutted off instead of setting the offset for the ongoing pcre_exec runs. So the pcre engine could never test for lookahead assertion, since the former string isn't there anymore. This results in a misbehavior and a not correctly executed match. Best regards, Martin -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Thu Jul 24 14:47:25 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Thu, 24 Jul 2014 14:47:25 -0000 Subject: [Varnish] #1557: regsuball() doesn't honor lookahead assertion on repeated strings In-Reply-To: <059.714287d6d2b95c176d62f92faff8648e@varnish-cache.org> References: <059.714287d6d2b95c176d62f92faff8648e@varnish-cache.org> Message-ID: <074.6306a48355b82fd068cf093aa7470f6e@varnish-cache.org> #1557: regsuball() doesn't honor lookahead assertion on repeated strings --------------------------------------+-------------------- Reporter: varnish@? | Owner: Type: defect | Status: new Priority: normal | Milestone: Component: varnishd | Version: trunk Severity: normal | Resolution: Keywords: lookahead pcre regsuball | --------------------------------------+-------------------- Comment (by varnish@?): A pull request has been commited to the GitHub repository: https://github.com/varnish/Varnish-Cache/pull/44 Best, Martin -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Thu Jul 24 14:54:24 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Thu, 24 Jul 2014 14:54:24 -0000 Subject: [Varnish] #1557: regsuball() doesn't honor lookahead assertion on repeated strings In-Reply-To: <059.714287d6d2b95c176d62f92faff8648e@varnish-cache.org> References: <059.714287d6d2b95c176d62f92faff8648e@varnish-cache.org> Message-ID: <074.27a9118aef75ea099ea7ff75f741a70f@varnish-cache.org> #1557: regsuball() doesn't honor lookahead assertion on repeated strings --------------------------------------+-------------------- Reporter: varnish@? | Owner: Type: defect | Status: new Priority: normal | Milestone: Component: varnishd | Version: trunk Severity: normal | Resolution: Keywords: lookahead pcre regsuball | --------------------------------------+-------------------- Comment (by varnish@?): Oh I forgot to mention, that this problem isn't exclusive to varnish- trunk. I've tested this down to varnish-3.0.5. Best, Martin -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Thu Jul 24 15:17:44 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Thu, 24 Jul 2014 15:17:44 -0000 Subject: [Varnish] #1557: regsuball() doesn't honor lookahead assertion on repeated strings In-Reply-To: <059.714287d6d2b95c176d62f92faff8648e@varnish-cache.org> References: <059.714287d6d2b95c176d62f92faff8648e@varnish-cache.org> Message-ID: <074.48e19e35a7ff323b8db09e98de7c2503@varnish-cache.org> #1557: regsuball() doesn't honor lookahead assertion on repeated strings --------------------------------------+-------------------- Reporter: varnish@? | Owner: Type: defect | Status: new Priority: normal | Milestone: Component: varnishd | Version: trunk Severity: normal | Resolution: Keywords: lookahead pcre regsuball | --------------------------------------+-------------------- Comment (by varnish@?): Damn, this gives a regression with varnishtest c00001.vtc. I've overseen this in my test runs. Try too look what happens there. -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Fri Jul 25 13:31:35 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Fri, 25 Jul 2014 13:31:35 -0000 Subject: [Varnish] #1555: configure not checking for readline.h, giving make error after fixing it. In-Reply-To: <041.90d39daf5df6b4940eecfab0845b4b93@varnish-cache.org> References: <041.90d39daf5df6b4940eecfab0845b4b93@varnish-cache.org> Message-ID: <056.78a9a6760df43e2c34e014ccf3fd913f@varnish-cache.org> #1555: configure not checking for readline.h, giving make error after fixing it. -------------------------------------+------------------------------------- Reporter: Tin | Owner: Nils Goroll Type: defect | Priority: normal | Status: closed Component: build | Milestone: Severity: normal | Version: 4.0.1 Keywords: readline.h not checked | Resolution: fixed by configure script | -------------------------------------+------------------------------------- Changes (by Nils Goroll ): * status: new => closed * owner: => Nils Goroll * resolution: => fixed Comment: In [78126742bfa3831959b3cbb3e8ff58e45ff49dd6]: {{{ #!CommitTicketReference repository="" revision="78126742bfa3831959b3cbb3e8ff58e45ff49dd6" really bail out if no usable readline found Fixes #1555 }}} -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Fri Jul 25 21:54:23 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Fri, 25 Jul 2014 21:54:23 -0000 Subject: [Varnish] #1555: configure not checking for readline.h, giving make error after fixing it. In-Reply-To: <041.90d39daf5df6b4940eecfab0845b4b93@varnish-cache.org> References: <041.90d39daf5df6b4940eecfab0845b4b93@varnish-cache.org> Message-ID: <056.866d219853fa306bd8e3d1e1a4f309e2@varnish-cache.org> #1555: configure not checking for readline.h, giving make error after fixing it. -------------------------------------+------------------------------------- Reporter: Tin | Owner: Nils Goroll Type: defect | Priority: normal | Status: reopened Component: build | Milestone: Severity: normal | Version: 4.0.1 Keywords: readline.h not checked | Resolution: by configure script | -------------------------------------+------------------------------------- Changes (by slink): * status: closed => reopened * resolution: fixed => Comment: The fix broke master when autocrap defined ax_cv_lib_readline and not ac_cv_have_readline, so fgsch reverted it - thanks. Fixing this would be easy, but I think we need to look at libedit vs. readline again. -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Fri Jul 25 21:54:54 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Fri, 25 Jul 2014 21:54:54 -0000 Subject: [Varnish] #1555: configure not checking for readline.h, giving make error after fixing it. In-Reply-To: <041.90d39daf5df6b4940eecfab0845b4b93@varnish-cache.org> References: <041.90d39daf5df6b4940eecfab0845b4b93@varnish-cache.org> Message-ID: <056.830ca3c199ce5398e6812f1e56f28f2e@varnish-cache.org> #1555: configure not checking for readline.h, giving make error after fixing it. -------------------------------------------------+------------------------- Reporter: Tin | Owner: slink Type: defect | Status: new Priority: normal | Milestone: Component: build | Version: 4.0.1 Severity: normal | Resolution: Keywords: readline.h not checked by configure | script | -------------------------------------------------+------------------------- Changes (by slink): * owner: Nils Goroll => slink * status: reopened => new -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Fri Jul 25 22:04:19 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Fri, 25 Jul 2014 22:04:19 -0000 Subject: [Varnish] #1558: varnish not truncating file cache if make size smaller Message-ID: <045.722a1778b04d21d0c60f1b9808d37ac7@varnish-cache.org> #1558: varnish not truncating file cache if make size smaller -----------------------------+---------------------- Reporter: anuaimi | Type: defect Status: new | Priority: normal Milestone: Varnish 3.0 dev | Component: varnishd Version: 3.0.5 | Severity: normal Keywords: file cache | -----------------------------+---------------------- we are running 3.0.5 on ubuntu. We originally set the file cache to be 33% of the disk. We were getting inconsistent results (some servers had cache files that were 65% of the disk). So we change the /etc/default/varnish file to have a hard-coded size in GB. When we restart varnish, the file is not resized. What we have to do is delete the file and then varnish will recreate it in the correct size. -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Sat Jul 26 16:03:15 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Sat, 26 Jul 2014 16:03:15 -0000 Subject: [Varnish] #1559: Panic: Assert error in Lck__Lock(), cache/cache_lck.c line 70: Condition(((ilck))->magic == (0x7b86c8a5)) not true. Message-ID: <048.7a9165bdfc48fb046161cf45781b760a@varnish-cache.org> #1559: Panic: Assert error in Lck__Lock(), cache/cache_lck.c line 70: Condition(((ilck))->magic == (0x7b86c8a5)) not true. -----------------------------+---------------------- Reporter: esfourteen | Type: defect Status: new | Priority: normal Milestone: | Component: varnishd Version: 4.0.1 | Severity: normal Keywords: panic, lck_lock | -----------------------------+---------------------- Child worker crashed after an uptime of several days with the following panic: {{{ Assert error in Lck__Lock(), cache/cache_lck.c line 70: Condition(((ilck))->magic == (0x7b86c8a5)) not true. }}} See attachment for full backtrace. See this ticket for my full VCL (from another open panic related ticket): https://www.varnish-cache.org/trac/ticket/1553 -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Mon Jul 28 00:42:25 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Mon, 28 Jul 2014 00:42:25 -0000 Subject: [Varnish] #1553: Assert error in VRY_Prep(), cache/cache_vary.c line 228 In-Reply-To: <048.d61504fcef043c3b9a266da23e519164@varnish-cache.org> References: <048.d61504fcef043c3b9a266da23e519164@varnish-cache.org> Message-ID: <063.f4873194bbeaddcc09411bca0a3b4766@varnish-cache.org> #1553: Assert error in VRY_Prep(), cache/cache_vary.c line 228 ----------------------------+-------------------- Reporter: esfourteen | Owner: Type: defect | Status: new Priority: normal | Milestone: Component: varnishd | Version: 4.0.1 Severity: normal | Resolution: Keywords: panic vry_prep | ----------------------------+-------------------- Comment (by fgsch): Can you try the attached patch ([[attachment:000-r01553.patch]]) and let us know? -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Mon Jul 28 07:14:00 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Mon, 28 Jul 2014 07:14:00 -0000 Subject: [Varnish] #1551: Panic message: Missing errorhandling code in HSH_Purge() In-Reply-To: <043.53e32d86a2b629abfc16269dbd859a95@varnish-cache.org> References: <043.53e32d86a2b629abfc16269dbd859a95@varnish-cache.org> Message-ID: <058.191f641213191e6e7b70021c8135d386@varnish-cache.org> #1551: Panic message: Missing errorhandling code in HSH_Purge() ----------------------+---------------------- Reporter: aduca | Owner: Type: defect | Status: new Priority: normal | Milestone: Component: varnishd | Version: unknown Severity: normal | Resolution: Keywords: | ----------------------+---------------------- Comment (by aduca): I modified the sess_workspace increasing it from 128K up to 1MB but unfortunately it still crashes. -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Mon Jul 28 07:19:54 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Mon, 28 Jul 2014 07:19:54 -0000 Subject: [Varnish] #1457: vcl.show should expand includes In-Reply-To: <043.f14786107b6d266ced5da4fa3a4b15af@varnish-cache.org> References: <043.f14786107b6d266ced5da4fa3a4b15af@varnish-cache.org> Message-ID: <058.737685c51778ef403dee443d0196d17f@varnish-cache.org> #1457: vcl.show should expand includes ----------------------+---------------------- Reporter: slink | Owner: daghf Type: defect | Status: closed Priority: normal | Milestone: Component: varnishd | Version: unknown Severity: normal | Resolution: fixed Keywords: | ----------------------+---------------------- Changes (by phk): * status: new => closed * resolution: => fixed Comment: This was fixed in dd1b9971fd95e9eb4927bc35e186b7c1b02b06ce -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Mon Jul 28 07:22:25 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Mon, 28 Jul 2014 07:22:25 -0000 Subject: [Varnish] #1547: panic when increasing shm_reclen In-Reply-To: <050.6ed11e37793b6c87e37bc1f1cd78d4ba@varnish-cache.org> References: <050.6ed11e37793b6c87e37bc1f1cd78d4ba@varnish-cache.org> Message-ID: <065.082f8ace7e07124aa84a02fb075a4aaa@varnish-cache.org> #1547: panic when increasing shm_reclen ---------------------------+-------------------- Reporter: mattrobenolt | Owner: Type: defect | Status: new Priority: normal | Milestone: Component: varnishd | Version: 4.0.1 Severity: normal | Resolution: Keywords: panic, shmlog | ---------------------------+-------------------- Comment (by phk): You need to also increase the vsl_buffer parameter so it can hold a full log record. I will change the code so it reacts more sensible to parameters out of sync -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Mon Jul 28 08:00:13 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Mon, 28 Jul 2014 08:00:13 -0000 Subject: [Varnish] #1547: panic when increasing shm_reclen In-Reply-To: <050.6ed11e37793b6c87e37bc1f1cd78d4ba@varnish-cache.org> References: <050.6ed11e37793b6c87e37bc1f1cd78d4ba@varnish-cache.org> Message-ID: <065.5588395acf9a983519beabde8c33553f@varnish-cache.org> #1547: panic when increasing shm_reclen ---------------------------+---------------------------------------- Reporter: mattrobenolt | Owner: Poul-Henning Kamp Type: defect | Status: closed Priority: normal | Milestone: Component: varnishd | Version: 4.0.1 Severity: normal | Resolution: fixed Keywords: panic, shmlog | ---------------------------+---------------------------------------- Changes (by Poul-Henning Kamp ): * status: new => closed * owner: => Poul-Henning Kamp * resolution: => fixed Comment: In [ea1e9ab8c451b33a2e3c2b5d9fbfd4bdc4bbf825]: {{{ #!CommitTicketReference repository="" revision="ea1e9ab8c451b33a2e3c2b5d9fbfd4bdc4bbf825" Rename shm_reclen to vsl_reclen for consistency. Leave shm_reclen as a parameter alias for now. Parameter vsl_buffer must be 12 bytes larger than vsl_reclen in order to avoid a panic when we try to put 12 pounds of VSL into a 5 pound vsl_buffer sack. Tweak the opposite parameter Minimum or Maximum value when we set one of of these parameters. Fixes #1547 }}} -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Mon Jul 28 08:52:13 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Mon, 28 Jul 2014 08:52:13 -0000 Subject: [Varnish] #1560: vcl-miss issue Message-ID: <055.a1af4c0cc202078bc37c5eaad681cb51@varnish-cache.org> #1560: vcl-miss issue ---------------------------------+------------------------- Reporter: suyog.shirgaonkar | Type: defect Status: new | Priority: normal Milestone: Varnish 4.0 release | Component: varnishtest Version: 4.0.1 | Severity: major Keywords: | ---------------------------------+------------------------- Hi, We are using varnish version 4.0.1 and we are facing some issues with vcl_purge. When we purge any object from cache, next request for that particular object should go to vcl_miss as this is not working, the request is showing vcl_hit in logs and it is served without aby error. Regards, Suyog -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Mon Jul 28 09:09:03 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Mon, 28 Jul 2014 09:09:03 -0000 Subject: [Varnish] #1521: Varnish 4 VCL compilation failed on OSX x86_64 In-Reply-To: <046.e981867b3cf05ae3a7f8c85fb022c396@varnish-cache.org> References: <046.e981867b3cf05ae3a7f8c85fb022c396@varnish-cache.org> Message-ID: <061.5a7e70fd3177c03df2447eeb77b3f477@varnish-cache.org> #1521: Varnish 4 VCL compilation failed on OSX x86_64 ----------------------+---------------------------------------- Reporter: yoloseem | Owner: Poul-Henning Kamp Type: defect | Status: closed Priority: normal | Milestone: Component: build | Version: 4.0.0 Severity: normal | Resolution: fixed Keywords: | ----------------------+---------------------------------------- Changes (by Poul-Henning Kamp ): * owner: => Poul-Henning Kamp * status: needinfo => closed * resolution: => fixed Comment: In [ad6bf9c0e51954cc45fee92d484e95c666d99685]: {{{ #!CommitTicketReference repository="" revision="ad6bf9c0e51954cc45fee92d484e95c666d99685" Varnishd needs to run the systems C-compiler to compile the VCL code. For security reasons, we run the C-compiler in a sandbox process which by default uses the same (non-)privileges as the other sandboxes (VCL compiler, test-loader process and the worker process). On some systems access to the C-compiler is limited, also for reasons of security, and varnishd will fail to compile VCL code, unless all the sandboxes are given access to the C-compiler. Add a new parameter "group_cc" which adds a single gid to the grouplist of the sandbox which executes the cc_command, for the benefit of such systems. Do some slightly related polishing of the docs/help-texts in this area while here anyway. Fixes #1521 }}} -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Mon Jul 28 10:01:48 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Mon, 28 Jul 2014 10:01:48 -0000 Subject: [Varnish] #1551: Panic message: Missing errorhandling code in HSH_Purge() In-Reply-To: <043.53e32d86a2b629abfc16269dbd859a95@varnish-cache.org> References: <043.53e32d86a2b629abfc16269dbd859a95@varnish-cache.org> Message-ID: <058.02bd572fbfe7dad1db72497de03c0385@varnish-cache.org> #1551: Panic message: Missing errorhandling code in HSH_Purge() ----------------------+---------------------------------------- Reporter: aduca | Owner: Poul-Henning Kamp Type: defect | Status: closed Priority: normal | Milestone: Component: varnishd | Version: unknown Severity: normal | Resolution: fixed Keywords: | ----------------------+---------------------------------------- Changes (by Poul-Henning Kamp ): * status: new => closed * owner: => Poul-Henning Kamp * resolution: => fixed Comment: In [53687f5b1e0fca2d5ad0a5deb271b2be7703aa49]: {{{ #!CommitTicketReference repository="" revision="53687f5b1e0fca2d5ad0a5deb271b2be7703aa49" If workspace_thread is not be big enough to hold all the objcore pointers that need to be purged we iterate until done. Fixes #1551 }}} -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Mon Jul 28 10:30:22 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Mon, 28 Jul 2014 10:30:22 -0000 Subject: [Varnish] #1560: vcl-miss issue In-Reply-To: <055.a1af4c0cc202078bc37c5eaad681cb51@varnish-cache.org> References: <055.a1af4c0cc202078bc37c5eaad681cb51@varnish-cache.org> Message-ID: <070.c8ab3680f63ece6df3a1fc49d02d249f@varnish-cache.org> #1560: vcl-miss issue -------------------------------+---------------------------------- Reporter: suyog.shirgaonkar | Owner: Type: defect | Status: closed Priority: normal | Milestone: Varnish 4.0 release Component: varnishtest | Version: 4.0.1 Severity: major | Resolution: worksforme Keywords: | -------------------------------+---------------------------------- Changes (by scoof): * status: new => closed * resolution: => worksforme Comment: This is probably not a bug in varnish, but most likely a bug in your VCL. Please use the forum, mailing-lists or IRC to diagnose the problem. The bug tracker should only be used for actionable bugs. If you still believe this is a bug and can include a precise description of how to replicate this behaviour, feel free to open a new bug report. -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Mon Jul 28 10:31:32 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Mon, 28 Jul 2014 10:31:32 -0000 Subject: [Varnish] #1558: varnish not truncating file cache if make size smaller In-Reply-To: <045.722a1778b04d21d0c60f1b9808d37ac7@varnish-cache.org> References: <045.722a1778b04d21d0c60f1b9808d37ac7@varnish-cache.org> Message-ID: <060.31ed1b622e317362fa694e85b27ac228@varnish-cache.org> #1558: varnish not truncating file cache if make size smaller ------------------------+------------------------------ Reporter: anuaimi | Owner: Type: defect | Status: closed Priority: normal | Milestone: Varnish 3.0 dev Component: varnishd | Version: 3.0.5 Severity: normal | Resolution: duplicate Keywords: file cache | ------------------------+------------------------------ Changes (by scoof): * status: new => closed * resolution: => duplicate Comment: Duplicate: #1343 -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Mon Jul 28 16:24:45 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Mon, 28 Jul 2014 16:24:45 -0000 Subject: [Varnish] #1547: panic when increasing shm_reclen In-Reply-To: <050.6ed11e37793b6c87e37bc1f1cd78d4ba@varnish-cache.org> References: <050.6ed11e37793b6c87e37bc1f1cd78d4ba@varnish-cache.org> Message-ID: <065.ee2c5f55ab01aec86538ab47ef0ceae4@varnish-cache.org> #1547: panic when increasing shm_reclen ---------------------------+---------------------------------------- Reporter: mattrobenolt | Owner: Poul-Henning Kamp Type: defect | Status: closed Priority: normal | Milestone: Component: varnishd | Version: 4.0.1 Severity: normal | Resolution: fixed Keywords: panic, shmlog | ---------------------------+---------------------------------------- Comment (by mattrobenolt): Ah, that makes a lot of sense. Thanks for the update to make it less error-prone. :) -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Mon Jul 28 17:08:57 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Mon, 28 Jul 2014 17:08:57 -0000 Subject: [Varnish] #1561: Assert error in VDP_gunzip(), cache/cache_gzip.c line 303 Message-ID: <043.94d8bf6c1454372600896f1e48ff1180@varnish-cache.org> #1561: Assert error in VDP_gunzip(), cache/cache_gzip.c line 303 -------------------+---------------------- Reporter: Corey | Type: defect Status: new | Priority: normal Milestone: | Component: varnishd Version: 4.0.1 | Severity: normal Keywords: | -------------------+---------------------- Experienced this a few times now, latest panic attached to ticket. -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Mon Jul 28 19:49:16 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Mon, 28 Jul 2014 19:49:16 -0000 Subject: [Varnish] #1562: panic when retrying a short read of req.body - Wrong req_body_status in HTTP1_IterateReqBody() Message-ID: <043.34e20926e8817c4ffe68c76d33ea1b30@varnish-cache.org> #1562: panic when retrying a short read of req.body - Wrong req_body_status in HTTP1_IterateReqBody() ----------------------+--------------------- Reporter: slink | Owner: slink Type: defect | Status: new Priority: high | Milestone: Component: varnishd | Version: unknown Severity: critical | Keywords: ----------------------+--------------------- I will attach a preliminary test case for this, of which the output is given below. The test case needs a change to vtc_http in order to prematurely close a client connection. The attached patch is not a proper solution to this vtc requirement. {{{ *** v1 1.5 debug| Child (18542) died signal=6\n *** v1 1.5 debug| Child (18542) Panic message:\n *** v1 1.5 debug| Wrong turn at cache/cache_http1_fsm.c:574:\n *** v1 1.5 debug| Wrong req_body_status in HTTP1_IterateReqBody()\n *** v1 1.5 debug| thread = (cache-worker)\n *** v1 1.5 debug| ident = Linux,3.13-1-amd64,x86_64,-smalloc,-smalloc,-hcritbit,epoll\n *** v1 1.5 debug| Backtrace:\n *** v1 1.5 debug| 0x4323f8: pan_ic+0xd8\n *** v1 1.5 debug| 0x42d3b8: HTTP1_IterateReqBody+0x178\n *** v1 1.5 debug| 0x42b12d: V1F_fetch_hdr+0x10d\n *** v1 1.5 debug| 0x41fe96: vbf_fetch_thread+0x10a6\n *** v1 1.5 debug| 0x434fe9: Pool_Work_Thread+0x389\n *** v1 1.5 debug| 0x4476cd: wrk_thread_real+0xad\n *** v1 1.5 debug| 0x7fdee09d80ca: /lib/x86_64-linux- gnu/libpthread.so.0(+0x80ca) [0x7fdee09d80ca]\n *** v1 1.5 debug| 0x7fdee070d06d: /lib/x86_64-linux- gnu/libc.so.6(clone+0x6d) [0x7fdee070d06d]\n *** v1 1.5 debug| busyobj = 0x7fdeb80008e0 {\n *** v1 1.5 debug| ws = 0x7fdeb80009a0 {\n *** v1 1.5 debug| id = "bo",\n *** v1 1.5 debug| {s,f,r,e} = {0x7fdeb80028c0,+112,(nil),+57376},\n *** v1 1.5 debug| },\n *** v1 1.5 debug| refcnt = 2\n *** v1 1.5 debug| retries = 1\n *** v1 1.5 debug| failed = 0\n *** v1 1.5 debug| state = 1\n *** v1 1.5 debug| is_do_stream\n *** v1 1.5 debug| is_do_pass\n *** v1 1.5 debug| is_uncacheable\n *** v1 1.5 debug| bodystatus = 0 (none),\n *** v1 1.5 debug| },\n *** v1 1.5 debug| http[bereq] = {\n *** v1 1.5 debug| ws = 0x7fdeb80009a0[bo]\n *** v1 1.5 debug| "POST",\n *** v1 1.5 debug| "/",\n *** v1 1.5 debug| "HTTP/1.1",\n *** v1 1.5 debug| "Content-Length: 10000",\n *** v1 1.5 debug| "X-Forwarded-For: 127.0.0.1",\n *** v1 1.5 debug| "X-Varnish: 1003",\n *** v1 1.5 debug| "Host: 127.0.0.1",\n *** v1 1.5 debug| },\n *** v1 1.5 debug| http[beresp] = {\n *** v1 1.5 debug| ws = 0x7fdeb80009a0[bo]\n *** v1 1.5 debug| },\n *** v1 1.5 debug| ws = 0x7fdeb8000b28 { BAD_MAGIC(0x00000000) },\n *** v1 1.5 debug| },\n *** v1 1.5 debug| objcore (FETCH) = 0x7fdea0000950 {\n *** v1 1.5 debug| refcnt = 2\n *** v1 1.5 debug| flags = 0x102\n *** v1 1.5 debug| objhead = 0xcfd420\n *** v1 1.5 debug| }\n *** v1 1.5 debug| }\n *** v1 1.5 debug| \n *** v1 1.5 debug| \n }}} -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Tue Jul 29 11:22:56 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Tue, 29 Jul 2014 11:22:56 -0000 Subject: [Varnish] #1562: panic when retrying a short read of req.body - Wrong req_body_status in HTTP1_IterateReqBody() In-Reply-To: <043.34e20926e8817c4ffe68c76d33ea1b30@varnish-cache.org> References: <043.34e20926e8817c4ffe68c76d33ea1b30@varnish-cache.org> Message-ID: <058.249328f4c3254e49a0cdf3dd069bc4c9@varnish-cache.org> #1562: panic when retrying a short read of req.body - Wrong req_body_status in HTTP1_IterateReqBody() ----------------------+---------------------- Reporter: slink | Owner: slink Type: defect | Status: closed Priority: high | Milestone: Component: varnishd | Version: unknown Severity: critical | Resolution: fixed Keywords: | ----------------------+---------------------- Changes (by Poul-Henning Kamp ): * status: new => closed * resolution: => fixed Comment: In [0d6629b1574e0f01c1cc72bc3a550ec132a92301]: {{{ #!CommitTicketReference repository="" revision="0d6629b1574e0f01c1cc72bc3a550ec132a92301" Correctly fail bad reads in req.body Spotted and fixed by: Nils Goroll Fixes #1562 }}} -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Tue Jul 29 14:51:55 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Tue, 29 Jul 2014 14:51:55 -0000 Subject: [Varnish] #1446: Varnish and PIC, failing tests, varnish-4.0.0-tp2+2014-03-06 on Fedora 19/x86_64 In-Reply-To: <044.b5f6b8c9362a34a38d6e4dd88a9e1f43@varnish-cache.org> References: <044.b5f6b8c9362a34a38d6e4dd88a9e1f43@varnish-cache.org> Message-ID: <059.4e32049ca1d224c394228f1652b41871@varnish-cache.org> #1446: Varnish and PIC, failing tests, varnish-4.0.0-tp2+2014-03-06 on Fedora 19/x86_64 ----------------------------------------+------------------------------ Reporter: ingvar | Owner: Type: defect | Status: closed Priority: normal | Milestone: Varnish 4.0-TP2 Component: build | Version: unknown Severity: normal | Resolution: invalid Keywords: PIC PIE hardened_build f19 | ----------------------------------------+------------------------------ Changes (by fgsch): * status: new => closed * resolution: => invalid Comment: Local problem. Fixed in the fedora specfile. -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Tue Jul 29 18:47:26 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Tue, 29 Jul 2014 18:47:26 -0000 Subject: [Varnish] #1561: Assert error in VDP_gunzip(), cache/cache_gzip.c line 303 In-Reply-To: <043.94d8bf6c1454372600896f1e48ff1180@varnish-cache.org> References: <043.94d8bf6c1454372600896f1e48ff1180@varnish-cache.org> Message-ID: <058.8bb999a2124816e03fc9a58ec296d44b@varnish-cache.org> #1561: Assert error in VDP_gunzip(), cache/cache_gzip.c line 303 ----------------------+---------------------------------------- Reporter: Corey | Owner: Poul-Henning Kamp Type: defect | Status: closed Priority: normal | Milestone: Component: varnishd | Version: 4.0.1 Severity: normal | Resolution: fixed Keywords: | ----------------------+---------------------------------------- Changes (by Poul-Henning Kamp ): * owner: => Poul-Henning Kamp * status: new => closed * resolution: => fixed Comment: In [398d7c6e3b2abf5ccc3b1def91a862636058fa6e]: {{{ #!CommitTicketReference repository="" revision="398d7c6e3b2abf5ccc3b1def91a862636058fa6e" Ensure that we never call a VDP with a zero length unless we are done. Fixes #1561 }}} -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Tue Jul 29 19:09:48 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Tue, 29 Jul 2014 19:09:48 -0000 Subject: [Varnish] #1553: Assert error in VRY_Prep(), cache/cache_vary.c line 228 In-Reply-To: <048.d61504fcef043c3b9a266da23e519164@varnish-cache.org> References: <048.d61504fcef043c3b9a266da23e519164@varnish-cache.org> Message-ID: <063.f2250e2b76c6fe70a894c96380daf48c@varnish-cache.org> #1553: Assert error in VRY_Prep(), cache/cache_vary.c line 228 ----------------------------+---------------------------------------- Reporter: esfourteen | Owner: Poul-Henning Kamp Type: defect | Status: closed Priority: normal | Milestone: Component: varnishd | Version: 4.0.1 Severity: normal | Resolution: fixed Keywords: panic vry_prep | ----------------------------+---------------------------------------- Changes (by Poul-Henning Kamp ): * owner: => Poul-Henning Kamp * status: new => closed * resolution: => fixed Comment: In [a309f194b7c0cb74652da9f01fbde513c807ce33]: {{{ #!CommitTicketReference repository="" revision="a309f194b7c0cb74652da9f01fbde513c807ce33" Make a dedicated cleanup function for req->vary_? to match the dedicated setup function we have. Fixes #1553 }}} -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Wed Jul 30 21:01:23 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Wed, 30 Jul 2014 21:01:23 -0000 Subject: [Varnish] #1553: Assert error in VRY_Prep(), cache/cache_vary.c line 228 In-Reply-To: <048.d61504fcef043c3b9a266da23e519164@varnish-cache.org> References: <048.d61504fcef043c3b9a266da23e519164@varnish-cache.org> Message-ID: <063.747fc08c0f86149eff8f1cc659e14bb9@varnish-cache.org> #1553: Assert error in VRY_Prep(), cache/cache_vary.c line 228 ----------------------------+---------------------------------------- Reporter: esfourteen | Owner: Poul-Henning Kamp Type: defect | Status: closed Priority: normal | Milestone: Component: varnishd | Version: 4.0.1 Severity: normal | Resolution: fixed Keywords: panic vry_prep | ----------------------------+---------------------------------------- Comment (by esfourteen): confirmed fixed using master -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Wed Jul 30 21:03:00 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Wed, 30 Jul 2014 21:03:00 -0000 Subject: [Varnish] #1559: Panic: Assert error in Lck__Lock(), cache/cache_lck.c line 70: Condition(((ilck))->magic == (0x7b86c8a5)) not true. In-Reply-To: <048.7a9165bdfc48fb046161cf45781b760a@varnish-cache.org> References: <048.7a9165bdfc48fb046161cf45781b760a@varnish-cache.org> Message-ID: <063.e223414e80695ae55cbf277b0ecd34bc@varnish-cache.org> #1559: Panic: Assert error in Lck__Lock(), cache/cache_lck.c line 70: Condition(((ilck))->magic == (0x7b86c8a5)) not true. -----------------------------+-------------------- Reporter: esfourteen | Owner: Type: defect | Status: new Priority: normal | Milestone: Component: varnishd | Version: 4.0.1 Severity: normal | Resolution: Keywords: panic, lck_lock | -----------------------------+-------------------- Comment (by esfourteen): switched to latest master and this has yet to occur -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Thu Jul 31 08:58:26 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Thu, 31 Jul 2014 08:58:26 -0000 Subject: [Varnish] #1563: Consider raising the client timeout in varnish test suite Message-ID: <044.079169cfe78321142561418cac22e845@varnish-cache.org> #1563: Consider raising the client timeout in varnish test suite -------------------------+------------------- Reporter: ingvar | Owner: Type: enhancement | Status: new Priority: normal | Milestone: Component: build | Version: 4.0.1 Severity: normal | Keywords: test -------------------------+------------------- As discussed with phk on irc: On slow/crowded builders, the client timeout (15s?) in the test suit might be a little low. Example here (armv7hl): https://bugzilla.redhat.com/show_bug.cgi?id=1107052 Please consider raising the client timeout in the test suite. Ingvar -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Thu Jul 31 09:49:43 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Thu, 31 Jul 2014 09:49:43 -0000 Subject: [Varnish] #1563: Consider raising the client timeout in varnish test suite In-Reply-To: <044.079169cfe78321142561418cac22e845@varnish-cache.org> References: <044.079169cfe78321142561418cac22e845@varnish-cache.org> Message-ID: <059.5aab346922bfb69e2c5627ed90c28281@varnish-cache.org> #1563: Consider raising the client timeout in varnish test suite -------------------------+---------------------------------------- Reporter: ingvar | Owner: Poul-Henning Kamp Type: enhancement | Status: closed Priority: normal | Milestone: Component: build | Version: 4.0.1 Severity: normal | Resolution: fixed Keywords: test | -------------------------+---------------------------------------- Changes (by Poul-Henning Kamp ): * status: new => closed * owner: => Poul-Henning Kamp * resolution: => fixed Comment: In [9bce25d60fa66c0697309a5e7135d32cc7e469c0]: {{{ #!CommitTicketReference repository="" revision="9bce25d60fa66c0697309a5e7135d32cc7e469c0" Default the http timeout to half of the total testduration. Fixes #1563 }}} -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Thu Jul 31 11:55:12 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Thu, 31 Jul 2014 11:55:12 -0000 Subject: [Varnish] #1512: Changes to bereq are lost between v_b_r and v_b_f In-Reply-To: <043.04a15d59ba73f8274523e0e9150e84a7@varnish-cache.org> References: <043.04a15d59ba73f8274523e0e9150e84a7@varnish-cache.org> Message-ID: <058.3ce9069479fac0f479f87e183ae7e159@varnish-cache.org> #1512: Changes to bereq are lost between v_b_r and v_b_f ----------------------+--------------------- Reporter: fgsch | Owner: phk Type: defect | Status: closed Priority: normal | Milestone: Component: varnishd | Version: trunk Severity: normal | Resolution: fixed Keywords: | ----------------------+--------------------- Changes (by Federico G. Schwindt ): * status: new => closed * resolution: => fixed Comment: In [3446fb1c563357054c6aa93a08c946ce080c26f1]: {{{ #!CommitTicketReference repository="" revision="3446fb1c563357054c6aa93a08c946ce080c26f1" Ensure bereq changes are not lost across vcl_backend_xxx methods Add std.rollback() to rollback req or bereq and mark the builtin rollback as deprecated. Original diff by Nils Goroll. Tweaks and reworked after phk comments by me. Fixes #1512 }}} -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Thu Jul 31 15:22:33 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Thu, 31 Jul 2014 15:22:33 -0000 Subject: [Varnish] #1564: segfault in jemalloc_linux.c Message-ID: <046.ab2a63b4baea1c77e9c154ec0808935c@varnish-cache.org> #1564: segfault in jemalloc_linux.c ----------------------+---------------------- Reporter: dcarlier | Type: defect Status: new | Priority: low Milestone: | Component: varnishd Version: unknown | Severity: major Keywords: | ----------------------+---------------------- warning: Could not load shared library symbols for ./vcl.A9lWNYFW.so. Do you need "set solib-search-path" or "set sysroot"? [Thread debugging using libthread_db enabled] Using host libthread_db library "/lib/x86_64-linux-gnu/libthread_db.so.1". Core was generated by `/opt/local/sbin/varnishd -P /var/run/varnishd.pid -a :80 -T localhost:6082 -f /'. Program terminated with signal SIGSEGV, Segmentation fault. #0 0x000000000046de5b in arena_dalloc_small (arena=0x7f9f74000020, chunk=0x7f9f74000000, ptr=0x7f9f740008c0, mapelm=0x7f9f74000020) at jemalloc_linux.c:3687 3687 jemalloc_linux.c: No such file or directory. (gdb) bt #0 0x000000000046de5b in arena_dalloc_small (arena=0x7f9f74000020, chunk=0x7f9f74000000, ptr=0x7f9f740008c0, mapelm=0x7f9f74000020) at jemalloc_linux.c:3687 #1 0x000000000046e1d5 in arena_dalloc (arena=0x7f9f74000020, chunk=0x7f9f74000000, ptr=0x7f9f740008c0) at jemalloc_linux.c:3939 #2 0x000000000046e244 in idalloc (ptr=0x7f9f740008c0) at jemalloc_linux.c:3957 #3 0x000000000047050a in free (ptr=0x7f9f740008c0) at jemalloc_linux.c:5586 #4 0x00007f9fe9b58fb9 in __GI__dl_deallocate_tls (tcb=tcb at entry=0x7f9f9d9fe700, dealloc_tcb=dealloc_tcb at entry=false) at dl- tls.c:482 #5 0x00007f9fe898b027 in __free_stacks (limit=limit at entry=41943040) at allocatestack.c:282 #6 0x00007f9fe898b13f in queue_stack (stack=0x47050a ) at allocatestack.c:310 #7 __deallocate_stack (pd=pd at entry=0x7f9f9a1f7700) at allocatestack.c:771 #8 0x00007f9fe898c275 in __free_tcb (pd=0x7f9f9a1f7700) at pthread_create.c:226 #9 start_thread (arg=0x7f9f9a1f7700) at pthread_create.c:432 #10 0x00007f9fe86b930d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:111 (gdb) quit To make it workable I added this : 3686 - assert(run->magic == ARENA_RUN_MAGIC); + if (run == NULL || run->bin == NULL) + return; + assert(run->magic == ARENA_RUN_MAGIC); Not sure it addresses really the issue but it went really missy without. -- Ticket URL: Varnish The Varnish HTTP Accelerator From varnish-bugs at varnish-cache.org Thu Jul 31 20:11:41 2014 From: varnish-bugs at varnish-cache.org (Varnish) Date: Thu, 31 Jul 2014 20:11:41 -0000 Subject: [Varnish] #1564: segfault in jemalloc_linux.c In-Reply-To: <046.ab2a63b4baea1c77e9c154ec0808935c@varnish-cache.org> References: <046.ab2a63b4baea1c77e9c154ec0808935c@varnish-cache.org> Message-ID: <061.f15c4214dca3ab430f459418576de9c0@varnish-cache.org> #1564: segfault in jemalloc_linux.c ----------------------+---------------------- Reporter: dcarlier | Owner: Type: defect | Status: closed Priority: low | Milestone: Component: varnishd | Version: unknown Severity: major | Resolution: invalid Keywords: | ----------------------+---------------------- Changes (by phk): * status: new => closed * resolution: => invalid Old description: > warning: Could not load shared library symbols for ./vcl.A9lWNYFW.so. > Do you need "set solib-search-path" or "set sysroot"? > [Thread debugging using libthread_db enabled] > Using host libthread_db library "/lib/x86_64-linux- > gnu/libthread_db.so.1". > Core was generated by `/opt/local/sbin/varnishd -P /var/run/varnishd.pid > -a :80 -T localhost:6082 -f /'. > Program terminated with signal SIGSEGV, Segmentation fault. > #0 0x000000000046de5b in arena_dalloc_small (arena=0x7f9f74000020, > chunk=0x7f9f74000000, ptr=0x7f9f740008c0, mapelm=0x7f9f74000020) at > jemalloc_linux.c:3687 > 3687 jemalloc_linux.c: No such file or directory. > (gdb) bt > #0 0x000000000046de5b in arena_dalloc_small (arena=0x7f9f74000020, > chunk=0x7f9f74000000, ptr=0x7f9f740008c0, mapelm=0x7f9f74000020) at > jemalloc_linux.c:3687 > #1 0x000000000046e1d5 in arena_dalloc (arena=0x7f9f74000020, > chunk=0x7f9f74000000, ptr=0x7f9f740008c0) at jemalloc_linux.c:3939 > #2 0x000000000046e244 in idalloc (ptr=0x7f9f740008c0) at > jemalloc_linux.c:3957 > #3 0x000000000047050a in free (ptr=0x7f9f740008c0) at > jemalloc_linux.c:5586 > #4 0x00007f9fe9b58fb9 in __GI__dl_deallocate_tls > (tcb=tcb at entry=0x7f9f9d9fe700, dealloc_tcb=dealloc_tcb at entry=false) at > dl-tls.c:482 > #5 0x00007f9fe898b027 in __free_stacks (limit=limit at entry=41943040) at > allocatestack.c:282 > #6 0x00007f9fe898b13f in queue_stack (stack=0x47050a ) at > allocatestack.c:310 > #7 __deallocate_stack (pd=pd at entry=0x7f9f9a1f7700) at > allocatestack.c:771 > #8 0x00007f9fe898c275 in __free_tcb (pd=0x7f9f9a1f7700) at > pthread_create.c:226 > #9 start_thread (arg=0x7f9f9a1f7700) at pthread_create.c:432 > #10 0x00007f9fe86b930d in clone () at > ../sysdeps/unix/sysv/linux/x86_64/clone.S:111 > (gdb) quit > > To make it workable I added this : > 3686 > - assert(run->magic == ARENA_RUN_MAGIC); > + if (run == NULL || run->bin == NULL) > + return; > + assert(run->magic == ARENA_RUN_MAGIC); > > Not sure it addresses really the issue but it went really missy without. New description: {{{ warning: Could not load shared library symbols for ./vcl.A9lWNYFW.so. Do you need "set solib-search-path" or "set sysroot"? [Thread debugging using libthread_db enabled] Using host libthread_db library "/lib/x86_64-linux-gnu/libthread_db.so.1". Core was generated by `/opt/local/sbin/varnishd -P /var/run/varnishd.pid -a :80 -T localhost:6082 -f /'. Program terminated with signal SIGSEGV, Segmentation fault. #0 0x000000000046de5b in arena_dalloc_small (arena=0x7f9f74000020, chunk=0x7f9f74000000, ptr=0x7f9f740008c0, mapelm=0x7f9f74000020) at jemalloc_linux.c:3687 3687 jemalloc_linux.c: No such file or directory. (gdb) bt #0 0x000000000046de5b in arena_dalloc_small (arena=0x7f9f74000020, chunk=0x7f9f74000000, ptr=0x7f9f740008c0, mapelm=0x7f9f74000020) at jemalloc_linux.c:3687 #1 0x000000000046e1d5 in arena_dalloc (arena=0x7f9f74000020, chunk=0x7f9f74000000, ptr=0x7f9f740008c0) at jemalloc_linux.c:3939 #2 0x000000000046e244 in idalloc (ptr=0x7f9f740008c0) at jemalloc_linux.c:3957 #3 0x000000000047050a in free (ptr=0x7f9f740008c0) at jemalloc_linux.c:5586 #4 0x00007f9fe9b58fb9 in __GI__dl_deallocate_tls (tcb=tcb at entry=0x7f9f9d9fe700, dealloc_tcb=dealloc_tcb at entry=false) at dl- tls.c:482 #5 0x00007f9fe898b027 in __free_stacks (limit=limit at entry=41943040) at allocatestack.c:282 #6 0x00007f9fe898b13f in queue_stack (stack=0x47050a ) at allocatestack.c:310 #7 __deallocate_stack (pd=pd at entry=0x7f9f9a1f7700) at allocatestack.c:771 #8 0x00007f9fe898c275 in __free_tcb (pd=0x7f9f9a1f7700) at pthread_create.c:226 #9 start_thread (arg=0x7f9f9a1f7700) at pthread_create.c:432 #10 0x00007f9fe86b930d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:111 (gdb) quit }}} To make it workable I added this : {{{ 3686 - assert(run->magic == ARENA_RUN_MAGIC); + if (run == NULL || run->bin == NULL) + return; + assert(run->magic == ARENA_RUN_MAGIC); }}} Not sure it addresses really the issue but it went really missy without. -- Comment: There is nothing in that backtrace that indicates that varnish has anything to do with that failure. I'm closing this ticket because there is nothing we can do about it. -- Ticket URL: Varnish The Varnish HTTP Accelerator