varnish-cache/vmod/vmod_directors_shard_cfg.c
0
/*-
1
 * Copyright 2009-2016 UPLEX - Nils Goroll Systemoptimierung
2
 * All rights reserved.
3
 *
4
 * Authors: Nils Goroll <nils.goroll@uplex.de>
5
 *          Geoffrey Simmons <geoff@uplex.de>
6
 *
7
 * SPDX-License-Identifier: BSD-2-Clause
8
 *
9
 * Redistribution and use in source and binary forms, with or without
10
 * modification, are permitted provided that the following conditions
11
 * are met:
12
 * 1. Redistributions of source code must retain the above copyright
13
 *    notice, this list of conditions and the following disclaimer.
14
 * 2. Redistributions in binary form must reproduce the above copyright
15
 *    notice, this list of conditions and the following disclaimer in the
16
 *    documentation and/or other materials provided with the distribution.
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
22
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28
 * SUCH DAMAGE.
29
 */
30
31
#include "config.h"
32
33
#include <limits.h>
34
#include <stdlib.h>
35
#include <stdio.h>
36
#include <string.h>
37
38
#include "cache/cache.h"
39
40
#include "vmod_directors_shard_dir.h"
41
#include "vmod_directors_shard_cfg.h"
42
43
/*lint -esym(749,  shard_change_task_e::*) */
44
enum shard_change_task_e {
45
        _SHARD_TASK_E_INVALID = 0,
46
        CLEAR,
47
        ADD_BE,
48
        REMOVE_BE,
49
        _SHARD_TASK_E_MAX
50
};
51
52
struct shard_change_task {
53
        unsigned                                magic;
54
#define SHARD_CHANGE_TASK_MAGIC                 0x1e1168af
55
        enum shard_change_task_e                task;
56
        void                                    *priv;
57
        VCL_REAL                                weight;
58
        VSTAILQ_ENTRY(shard_change_task)        list;
59
};
60
61
struct shard_change {
62
        unsigned                                magic;
63
#define SHARD_CHANGE_MAGIC                      0xdff5c9a6
64
        struct vsl_log                          *vsl;
65
        struct sharddir                         *shardd;
66
        VSTAILQ_HEAD(,shard_change_task)        tasks;
67
};
68
69
struct backend_reconfig {
70
        struct sharddir * const shardd;
71
        unsigned                hint;   // on number of backends after reconfig
72
        unsigned                hole_n; // number of holes in backends array
73
        unsigned                hole_i; // index hint on first hole
74
};
75
76
/* forward decl */
77
static VCL_BOOL
78
change_reconfigure(VRT_CTX, struct shard_change *change, VCL_INT replicas);
79
80
/*
81
 * ============================================================
82
 * change / task list
83
 *
84
 * for backend reconfiguration, we create a change list on the VCL workspace in
85
 * a PRIV_TASK state, which we work in reconfigure.
86
 */
87
88
static void v_matchproto_(vmod_priv_fini_f)
89 760
shard_change_fini(VRT_CTX, void * priv)
90
{
91
        struct shard_change *change;
92
93 760
        if (priv == NULL)
94 0
                return;
95
96 760
        CAST_OBJ_NOTNULL(change, priv, SHARD_CHANGE_MAGIC);
97
98 760
        (void) change_reconfigure(ctx, change, 67);
99 760
}
100
101
static const struct vmod_priv_methods shard_change_priv_methods[1] = {{
102
        .magic = VMOD_PRIV_METHODS_MAGIC,
103
        .type = "vmod_directors_shard_cfg",
104
        .fini = shard_change_fini
105
}};
106
107
static struct shard_change *
108 7880
shard_change_get(VRT_CTX, struct sharddir * const shardd)
109
{
110
        struct vmod_priv *task;
111
        struct shard_change *change;
112 7880
        const void *id = (const char *)shardd + task_off_cfg;
113
114 7880
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
115
116 7880
        task = VRT_priv_task(ctx, id);
117 7880
        if (task == NULL) {
118 0
                shard_fail(ctx, shardd->name, "%s", "no priv_task");
119 0
                return (NULL);
120
        }
121
122 7880
        if (task->priv != NULL) {
123 7120
                CAST_OBJ_NOTNULL(change, task->priv, SHARD_CHANGE_MAGIC);
124 7120
                assert (change->vsl == ctx->vsl);
125 7120
                assert (change->shardd == shardd);
126 7120
                return (change);
127
        }
128
129 1520
        WS_TASK_ALLOC_OBJ(ctx, change, SHARD_CHANGE_MAGIC);
130 760
        if (change == NULL)
131 0
                return (NULL);
132 760
        change->vsl = ctx->vsl;
133 760
        change->shardd = shardd;
134 760
        VSTAILQ_INIT(&change->tasks);
135 760
        task->priv = change;
136 760
        task->methods = shard_change_priv_methods;
137
138 760
        return (change);
139 7880
}
140
141
static void
142 1600
shard_change_finish(struct shard_change *change)
143
{
144 1600
        CHECK_OBJ_NOTNULL(change, SHARD_CHANGE_MAGIC);
145
146 1600
        VSTAILQ_INIT(&change->tasks);
147 1600
}
148
149
static struct shard_change_task *
150 6280
shard_change_task_add(VRT_CTX, struct shard_change *change,
151
    enum shard_change_task_e task_e, void *priv)
152
{
153
        struct shard_change_task *task;
154
155 6280
        CHECK_OBJ_NOTNULL(change, SHARD_CHANGE_MAGIC);
156
157 12560
        WS_TASK_ALLOC_OBJ(ctx, task, SHARD_CHANGE_TASK_MAGIC);
158 6280
        if (task == NULL)
159 0
                return (NULL);
160 6280
        task->task = task_e;
161 6280
        task->priv = priv;
162 6280
        VSTAILQ_INSERT_TAIL(&change->tasks, task, list);
163
164 6280
        return (task);
165 6280
}
166
167
static inline struct shard_change_task *
168 5680
shard_change_task_backend(VRT_CTX, struct sharddir *shardd,
169
    enum shard_change_task_e task_e, VCL_BACKEND be, VCL_STRING ident,
170
    VCL_DURATION rampup)
171
{
172
        struct shard_change *change;
173
        struct shard_backend *b;
174
175 5680
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
176 5680
        assert(task_e == ADD_BE || task_e == REMOVE_BE);
177
178 5680
        change = shard_change_get(ctx, shardd);
179 5680
        if (change == NULL)
180 0
                return (NULL);
181
182 5680
        b = WS_Alloc(ctx->ws, sizeof(*b));
183 5680
        if (b == NULL) {
184 0
                shard_fail(ctx, change->shardd->name, "%s",
185
                    "could not get workspace for task");
186 0
                return (NULL);
187
        }
188
189 5680
        b->backend = NULL;
190 5680
        VRT_Assign_Backend(&b->backend, be);
191 5680
        b->ident = ident != NULL && *ident != '\0' ? ident : NULL;
192 5680
        b->rampup = rampup;
193
194 5680
        return (shard_change_task_add(ctx, change, task_e, b));
195 5680
}
196
197
/*
198
 * ============================================================
199
 * director reconfiguration tasks
200
 */
201
VCL_BOOL
202 4520
shardcfg_add_backend(VRT_CTX, struct sharddir *shardd,
203
    VCL_BACKEND be, VCL_STRING ident, VCL_DURATION rampup, VCL_REAL weight)
204
{
205
        struct shard_change_task *task;
206
207 4520
        assert (weight >= 1);
208 4520
        AN(be);
209
210 9040
        task = shard_change_task_backend(ctx, shardd, ADD_BE,
211 4520
            be, ident, rampup);
212
213 4520
        if (task == NULL)
214 0
                return (0);
215
216 4520
        task->weight = weight;
217 4520
        return (1);
218 4520
}
219
220
VCL_BOOL
221 1160
shardcfg_remove_backend(VRT_CTX, struct sharddir *shardd,
222
    VCL_BACKEND be, VCL_STRING ident)
223
{
224 3480
        return (shard_change_task_backend(ctx, shardd, REMOVE_BE,
225 2320
            be, ident, 0) != NULL);
226
}
227
228
VCL_BOOL
229 600
shardcfg_clear(VRT_CTX, struct sharddir *shardd)
230
{
231
        struct shard_change *change;
232
233 600
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
234
235 600
        change = shard_change_get(ctx, shardd);
236 600
        if (change == NULL)
237 0
                return (0);
238
239 600
        return (shard_change_task_add(ctx, change, CLEAR, NULL) != NULL);
240 600
}
241
242
/*
243
 * ============================================================
244
 * consistent hashing circle init
245
 */
246
247
typedef int (*compar)( const void*, const void* );
248
249
static int
250 416920
circlepoint_compare(const struct shard_circlepoint *a,
251
    const struct shard_circlepoint *b)
252
{
253 416920
        return ((a->point == b->point) ? 0 : ((a->point > b->point) ? 1 : -1));
254
}
255
256
static void
257 1440
shardcfg_hashcircle(struct sharddir *shardd)
258
{
259
        const struct shard_backend *backends, *b;
260
        unsigned h;
261
        uint32_t i, j, n_points, r, rmax;
262
        const char *ident;
263
        char s[12]; // log10(UINT32_MAX) + 2;
264
265 1440
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
266 1440
        AZ(shardd->hashcircle);
267
268 1440
        assert(shardd->n_backend > 0);
269 1440
        backends=shardd->backend;
270 1440
        AN(backends);
271
272 1440
        n_points = 0;
273 1440
        rmax = (UINT32_MAX - 1) / shardd->n_backend;
274 7480
        for (b = backends; b < backends + shardd->n_backend; b++) {
275 6040
                CHECK_OBJ_NOTNULL(b->backend, DIRECTOR_MAGIC);
276 6040
                n_points += vmin_t(uint32_t, b->replicas, rmax);
277 6040
        }
278
279 1440
        assert(n_points < UINT32_MAX);
280
281 1440
        shardd->n_points = n_points;
282 1440
        shardd->hashcircle = calloc(n_points, sizeof(struct shard_circlepoint));
283 1440
        AN(shardd->hashcircle);
284
285 1440
        i = 0;
286 7480
        for (h = 0, b = backends; h < shardd->n_backend; h++, b++) {
287 6040
                ident = b->ident ? b->ident : VRT_BACKEND_string(b->backend);
288
289 6040
                AN(ident);
290 6040
                assert(ident[0] != '\0');
291
292 6040
                r = vmin_t(uint32_t, b->replicas, rmax);
293
294 73400
                for (j = 0; j < r; j++) {
295 67360
                        bprintf(s, "%d", j);
296 67360
                        assert (i < n_points);
297 67360
                        shardd->hashcircle[i].point =
298 67360
                            VRT_HashStrands32(TOSTRANDS(2, ident, s));
299 67360
                        shardd->hashcircle[i].host = h;
300 67360
                        i++;
301 67360
                }
302 6040
        }
303 1440
        assert (i == n_points);
304 1440
        qsort( (void *) shardd->hashcircle, n_points,
305
            sizeof (struct shard_circlepoint), (compar) circlepoint_compare);
306
307 1440
        if ((shardd->debug_flags & SHDBG_CIRCLE) == 0)
308 520
                return;
309
310 25160
        for (i = 0; i < n_points; i++)
311 24240
                SHDBG(SHDBG_CIRCLE, shardd,
312
                    "hashcircle[%5jd] = {point = %8x, host = %2u}\n",
313
                    (intmax_t)i, shardd->hashcircle[i].point,
314
                    shardd->hashcircle[i].host);
315 1440
}
316
317
/*
318
 * ============================================================
319
 * configure the director backends
320
 */
321
322
static void
323 1840
shardcfg_backend_free(struct shard_backend *f)
324
{
325 1840
        if (f->freeptr)
326 1400
                free (f->freeptr);
327 1840
        VRT_Assign_Backend(&f->backend, NULL);
328 1840
        memset(f, 0, sizeof(*f));
329 1840
}
330
331
static void
332 3880
shardcfg_backend_copyin(struct shard_backend *dst,
333
    const struct shard_backend *src)
334
{
335 3880
        dst->backend = src->backend;
336 3880
        dst->ident = src->ident ? strdup(src->ident) : NULL;
337 3880
        dst->rampup = src->rampup;
338 3880
}
339
340
static int
341 28560
shardcfg_backend_cmp(const struct shard_backend *a,
342
    const struct shard_backend *b)
343
{
344
        const char *ai, *bi;
345
346 28560
        ai = a->ident;
347 28560
        bi = b->ident;
348
349 28560
        assert(ai || a->backend);
350 28560
        assert(bi || b->backend);
351
352
        /* vcl_names are unique, so we can compare the backend pointers */
353 28560
        if (ai == NULL && bi == NULL)
354 1880
                return (a->backend != b->backend);
355
356 26680
        if (ai == NULL)
357 240
                ai = VRT_BACKEND_string(a->backend);
358
359 26680
        if (bi == NULL)
360 600
                bi = VRT_BACKEND_string(b->backend);
361
362 26680
        AN(ai);
363 26680
        AN(bi);
364 26680
        return (strcmp(ai, bi));
365 28560
}
366
367
/* for removal, we delete all instances if the backend matches */
368
static int
369 13000
shardcfg_backend_del_cmp(const struct shard_backend *task,
370
    const struct shard_backend *b)
371
{
372 13000
        assert(task->backend || task->ident);
373
374 13000
        if (task->ident == NULL)
375 360
                return (task->backend != b->backend);
376
377 12640
        return (shardcfg_backend_cmp(task, b));
378 13000
}
379
380
static const struct shard_backend *
381 4200
shardcfg_backend_lookup(const struct backend_reconfig *re,
382
    const struct shard_backend *b)
383
{
384 4200
        unsigned i, max = re->shardd->n_backend + re->hole_n;
385 4200
        const struct shard_backend *bb = re->shardd->backend;
386
387 4200
        if (max > 0)
388 3200
                AN(bb);
389
390 20040
        for (i = 0; i < max; i++) {
391 16160
                if (bb[i].backend == NULL)
392 240
                        continue;       // hole
393 15920
                if (!shardcfg_backend_cmp(b, &bb[i]))
394 320
                        return (&bb[i]);
395 15600
        }
396 3880
        return (NULL);
397 4200
}
398
399
static void
400 760
shardcfg_backend_expand(const struct backend_reconfig *re)
401
{
402 760
        unsigned min = re->hint;
403
404 760
        CHECK_OBJ_NOTNULL(re->shardd, SHARDDIR_MAGIC);
405
406 760
        min = vmax_t(unsigned, min, 16);
407
408 760
        if (re->shardd->l_backend < min)
409 760
                re->shardd->l_backend = min;
410
        else
411 0
                re->shardd->l_backend *= 2;
412
413 1520
        re->shardd->backend = realloc(re->shardd->backend,
414 760
            re->shardd->l_backend * sizeof *re->shardd->backend);
415
416 760
        AN(re->shardd->backend);
417 760
}
418
419
static void
420 3880
shardcfg_backend_add(struct backend_reconfig *re,
421
    const struct shard_backend *b, uint32_t replicas)
422
{
423
        unsigned i;
424 3880
        struct shard_backend *bb = re->shardd->backend;
425
426 3880
        if (re->hole_n == 0) {
427 3720
                if (re->shardd->n_backend >= re->shardd->l_backend) {
428 760
                        shardcfg_backend_expand(re);
429 760
                        bb = re->shardd->backend;
430 760
                }
431 3720
                assert(re->shardd->n_backend < re->shardd->l_backend);
432 3720
                i = re->shardd->n_backend;
433 3720
        } else {
434 160
                assert(re->hole_i != UINT_MAX);
435 160
                do {
436 160
                        if (!bb[re->hole_i].backend)
437 160
                                break;
438 0
                } while (++(re->hole_i) < re->shardd->n_backend + re->hole_n);
439 160
                assert(re->hole_i < re->shardd->n_backend + re->hole_n);
440
441 160
                i = (re->hole_i)++;
442 160
                (re->hole_n)--;
443
        }
444
445 3880
        re->shardd->n_backend++;
446 3880
        shardcfg_backend_copyin(&bb[i], b);
447 3880
        bb[i].replicas = replicas;
448 3880
}
449
450
void
451 720
shardcfg_backend_clear(struct sharddir *shardd)
452
{
453
        unsigned i;
454 1320
        for (i = 0; i < shardd->n_backend; i++)
455 600
                shardcfg_backend_free(&shardd->backend[i]);
456 720
        shardd->n_backend = 0;
457 720
}
458
459
460
static void
461 1160
shardcfg_backend_del(struct backend_reconfig *re, struct shard_backend *spec)
462
{
463 1160
        unsigned i, max = re->shardd->n_backend + re->hole_n;
464 1160
        struct shard_backend * const bb = re->shardd->backend;
465
466 19040
        for (i = 0; i < max; i++) {
467 17880
                if (bb[i].backend == NULL)
468 4880
                        continue;       // hole
469 13000
                if (shardcfg_backend_del_cmp(spec, &bb[i]))
470 11760
                        continue;
471
472 1240
                shardcfg_backend_free(&bb[i]);
473 1240
                re->shardd->n_backend--;
474 1240
                if (i < re->shardd->n_backend + re->hole_n) {
475 1080
                        (re->hole_n)++;
476 1080
                        re->hole_i = vmin(re->hole_i, i);
477 1080
                }
478 1240
        }
479 1160
        VRT_Assign_Backend(&spec->backend, NULL);
480 1160
}
481
482
static void
483 1440
shardcfg_backend_finalize(struct backend_reconfig *re)
484
{
485
        unsigned i;
486 1440
        struct shard_backend * const bb = re->shardd->backend;
487
488 1680
        while (re->hole_n > 0) {
489
                // trim end
490 360
                i = re->shardd->n_backend + re->hole_n - 1;
491 1040
                while (re->hole_n && bb[i].backend == NULL) {
492 680
                        (re->hole_n)--;
493 680
                        i--;
494
                }
495
496 360
                if (re->hole_n == 0)
497 120
                        break;
498
499 240
                assert(re->hole_i < i);
500
501 240
                do {
502 240
                        if (!bb[re->hole_i].backend)
503 240
                                break;
504 0
                } while (++(re->hole_i) <= i);
505
506 240
                assert(re->hole_i < i);
507 240
                assert(bb[re->hole_i].backend == NULL);
508 240
                assert(bb[i].backend != NULL);
509
510 240
                memcpy(&bb[re->hole_i], &bb[i], sizeof(*bb));
511 240
                memset(&bb[i], 0, sizeof(*bb));
512
513 240
                (re->hole_n)--;
514 240
                (re->hole_i)++;
515
        }
516
517 1440
        assert(re->hole_n == 0);
518 1440
}
519
520
/*
521
 * ============================================================
522
 * work the change tasks
523
 */
524
525
static void
526 1600
shardcfg_apply_change(struct vsl_log *vsl, struct sharddir *shardd,
527
    const struct shard_change *change, VCL_INT replicas)
528
{
529
        struct shard_change_task *task, *clear;
530
        const struct shard_backend *b;
531
        uint32_t b_replicas;
532
533 4800
        struct backend_reconfig re = {
534 1600
                .shardd = shardd,
535 1600
                .hint = shardd->n_backend,
536
                .hole_n = 0,
537
                .hole_i = UINT_MAX
538
        };
539
540
        // XXX assert sharddir_locked(shardd)
541
542 1600
        clear = NULL;
543 7880
        VSTAILQ_FOREACH(task, &change->tasks, list) {
544 6280
                CHECK_OBJ_NOTNULL(task, SHARD_CHANGE_TASK_MAGIC);
545 6280
                switch (task->task) {
546
                case CLEAR:
547 600
                        clear = task;
548 600
                        re.hint = 0;
549 600
                        break;
550
                case ADD_BE:
551 4520
                        re.hint++;
552 4520
                        break;
553
                case REMOVE_BE:
554 1160
                        break;
555
                default:
556 0
                        INCOMPL();
557 0
                }
558 6280
        }
559
560 1600
        if (clear) {
561 520
                shardcfg_backend_clear(shardd);
562 520
                clear = VSTAILQ_NEXT(clear, list);
563 520
                if (clear == NULL)
564 160
                        return;
565 360
        }
566
567 1440
        task = clear;
568 6800
        VSTAILQ_FOREACH_FROM(task, &change->tasks, list) {
569 5360
                CHECK_OBJ_NOTNULL(task, SHARD_CHANGE_TASK_MAGIC);
570 5360
                switch (task->task) {
571
                case CLEAR:
572 0
                        assert(task->task != CLEAR);
573 0
                        break;
574
                case ADD_BE:
575 4200
                        b = shardcfg_backend_lookup(&re, task->priv);
576
577 4200
                        if (b == NULL) {
578 3880
                                assert (task->weight >= 1);
579 3880
                                if (replicas * task->weight > UINT32_MAX)
580 0
                                        b_replicas = UINT32_MAX;
581
                                else
582 3880
                                        b_replicas = (uint32_t) // flint
583 3880
                                                (replicas * task->weight);
584
585 7760
                                shardcfg_backend_add(&re, task->priv,
586 3880
                                    b_replicas);
587 3880
                                break;
588
                        }
589
590 320
                        const char * const ident = b->ident;
591
592 320
                        shard_notice(vsl, shardd->name,
593
                            "backend %s%s%s already exists - skipping",
594
                            VRT_BACKEND_string(b->backend),
595
                            ident ? "/" : "",
596
                            ident ? ident : "");
597 320
                        break;
598
                case REMOVE_BE:
599 1160
                        shardcfg_backend_del(&re, task->priv);
600 1160
                        break;
601
                default:
602 0
                        INCOMPL();
603 0
                }
604 5360
        }
605 1440
        shardcfg_backend_finalize(&re);
606 1600
}
607
608
/*
609
 * ============================================================
610
 * top reconfiguration function
611
 */
612
613
static VCL_BOOL
614 2360
change_reconfigure(VRT_CTX, struct shard_change *change, VCL_INT replicas)
615
{
616
        struct sharddir *shardd;
617
618 2360
        CHECK_OBJ_NOTNULL(change, SHARD_CHANGE_MAGIC);
619 2360
        assert (replicas > 0);
620 2360
        shardd = change->shardd;
621 2360
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
622
623 2360
        if (VSTAILQ_FIRST(&change->tasks) == NULL)
624 760
                return (1);
625
626 1600
        sharddir_wrlock(shardd);
627
628 1600
        shardcfg_apply_change(ctx->vsl, shardd, change, replicas);
629 1600
        shard_change_finish(change);
630
631 1600
        if (shardd->hashcircle)
632 720
                free(shardd->hashcircle);
633 1600
        shardd->hashcircle = NULL;
634
635 1600
        if (shardd->n_backend == 0) {
636 160
                shard_err0(ctx->vsl, shardd->name,
637
                    ".reconfigure() no backends");
638 160
                sharddir_unlock(shardd);
639 160
                return (0);
640
        }
641
642 1440
        shardcfg_hashcircle(shardd);
643 1440
        sharddir_unlock(shardd);
644 1440
        return (1);
645 2360
}
646
647
VCL_BOOL
648 1680
shardcfg_reconfigure(VRT_CTX, struct sharddir *shardd, VCL_INT replicas)
649
{
650
        struct shard_change *change;
651
652 1680
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
653 1680
        if (replicas <= 0) {
654 80
                shard_err(ctx->vsl, shardd->name,
655
                    ".reconfigure() invalid replicas argument %ld", replicas);
656 80
                return (0);
657
        }
658
659 1600
        change = shard_change_get(ctx, shardd);
660 1600
        if (change == NULL)
661 0
                return (0);
662
663 1600
        return (change_reconfigure(ctx, change, replicas));
664 1680
}
665
666
/*
667
 * ============================================================
668
 * misc config related
669
 */
670
671
/* only for sharddir_delete() */
672
void
673 200
shardcfg_delete(const struct sharddir *shardd)
674
{
675
676 200
        AZ(shardd->n_backend);
677 200
        if (shardd->backend)
678 0
                free(shardd->backend);
679 200
        if (shardd->hashcircle)
680 0
                free(shardd->hashcircle);
681 200
}
682
683
VCL_VOID
684 80
shardcfg_set_warmup(struct sharddir *shardd, VCL_REAL ratio)
685
{
686 80
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
687 80
        assert(ratio >= 0 && ratio < 1);
688 80
        sharddir_wrlock(shardd);
689 80
        shardd->warmup = ratio;
690 80
        sharddir_unlock(shardd);
691 80
}
692
693
VCL_VOID
694 80
shardcfg_set_rampup(struct sharddir *shardd, VCL_DURATION duration)
695
{
696 80
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
697 80
        assert(duration >= 0);
698 80
        sharddir_wrlock(shardd);
699 80
        shardd->rampup_duration = duration;
700 80
        sharddir_unlock(shardd);
701 80
}
702
703
VCL_DURATION
704 6480
shardcfg_get_rampup(const struct sharddir *shardd, unsigned host)
705
{
706
        VCL_DURATION r;
707
708 6480
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
709
        // assert sharddir_rdlock_held(shardd);
710 6480
        assert (host < shardd->n_backend);
711
712 6480
        if (isnan(shardd->backend[host].rampup))
713 6320
                r = shardd->rampup_duration;
714
        else
715 160
                r = shardd->backend[host].rampup;
716
717 6480
        return (r);
718
}