28
28
#include "ipc.h"
29
29
#include "daemonize.h"
30
30
31
-
32
31
struct process_group {
33
32
enum process_type type ;
34
33
struct socket_info * si_filter ;
35
34
fork_new_process_f * fork_func ;
36
35
terminate_process_f * term_func ;
37
- unsigned int max_procs ;
38
- unsigned int min_procs ;
39
- /* some reference to a profile to give us params for fork/rip procs */
36
+ struct scaling_profile * prof ;
40
37
unsigned char history_size ;
38
+ unsigned char * history_map ;
41
39
unsigned char history_idx ;
42
40
unsigned short no_downscale_cycles ;
43
- unsigned char * history_map ;
44
41
struct process_group * next ;
45
42
};
46
43
47
- #define PG_HISTORY_DEFAULT_SIZE 5 /*to be replaced with val from profile*/
48
- #define PG_HIGH_MIN_SCORE 4 /*to be replaced with val from profile*/
49
- #define PG_HLOAD_TRESHOLD 50 /*to be replaced with val from profile*/
50
- #define PG_LLOAD_TRESHOLD 20 /*to be replaced with val from profile*/
44
+ static struct process_group * pg_head = NULL ;
45
+
46
+ static struct scaling_profile * profiles_head = NULL ;
47
+
48
+
49
+
50
+ int create_auto_scaling_profile ( char * name ,
51
+ unsigned int max_procs , unsigned int up_threshold ,
52
+ unsigned int up_cycles_needed , unsigned int up_cycles_tocheck ,
53
+ unsigned int min_procs , unsigned int down_threshold ,
54
+ unsigned int down_cycles_tocheck , unsigned short down_cycles_delay )
55
+ {
56
+ struct scaling_profile * p ;
57
+
58
+ p = (struct scaling_profile * )pkg_malloc ( sizeof (struct scaling_profile ) +
59
+ strlen (name ) + 1 );
60
+ if (p == NULL ) {
61
+ LM_ERR ("failed to allocate memory for a new auto-scaling profile\n" );
62
+ return -1 ;
63
+ }
64
+
65
+ /* not really need, more to be safe for future expansions */
66
+ memset ( p , 0 , sizeof (struct scaling_profile ));
67
+
68
+ p -> max_procs = max_procs ;
69
+ p -> up_threshold = up_threshold ;
70
+ p -> up_cycles_needed = up_cycles_needed ;
71
+ p -> up_cycles_tocheck = up_cycles_tocheck ;
72
+ p -> min_procs = min_procs ;
73
+ p -> down_threshold = down_threshold ;
74
+ p -> down_cycles_tocheck = down_cycles_tocheck ;
75
+ p -> down_cycles_delay = down_cycles_delay ;
76
+ p -> name = (char * )(p + 1 );
77
+ strcpy ( p -> name , name );
78
+
79
+ p -> next = profiles_head ;
80
+ profiles_head = p ;
81
+
82
+ return 0 ;
83
+ }
84
+
85
+
86
+ struct scaling_profile * get_scaling_profile (char * name )
87
+ {
88
+ struct scaling_profile * p ;
89
+
90
+ for ( p = profiles_head ; p ; p = p -> next )
91
+ if (strcasecmp (name , p -> name )== 0 )
92
+ return p ;
93
+
94
+ return NULL ;
95
+ }
51
96
52
- struct process_group * pg_head = NULL ;
53
97
54
98
int create_process_group (enum process_type type ,
55
- struct socket_info * si_filter ,
56
- unsigned int min_procs , unsigned int max_procs ,
57
- fork_new_process_f * f1 , terminate_process_f * f2 )
99
+ struct socket_info * si_filter , struct scaling_profile * prof ,
100
+ fork_new_process_f * f1 , terminate_process_f * f2 )
58
101
{
59
102
struct process_group * pg , * it ;
103
+ int h_size ;
104
+
105
+ /* how much of a history do we need in order to cover both up and down
106
+ * tranzitions ? */
107
+ h_size = (prof -> up_cycles_tocheck > prof -> down_cycles_tocheck ) ?
108
+ prof -> up_cycles_tocheck : prof -> down_cycles_tocheck ;
60
109
61
110
pg = (struct process_group * )shm_malloc ( sizeof (struct process_group ) +
62
- sizeof (char )* PG_HISTORY_DEFAULT_SIZE );
111
+ sizeof (char )* h_size );
63
112
if (pg == NULL ) {
64
113
LM_ERR ("failed to allocate memory for a new process group\n" );
65
114
return -1 ;
66
115
}
67
- memset ( pg , 0 , sizeof (struct process_group ) +
68
- sizeof (char )* PG_HISTORY_DEFAULT_SIZE );
116
+ memset ( pg , 0 , sizeof (struct process_group ) + sizeof (char )* h_size );
69
117
70
118
LM_DBG ("registering group of processes type %d, socket filter %p, "
71
- "process range [%d,%d] \n" , type , si_filter , min_procs , max_procs );
119
+ "scaling profile <%s> \n" , type , si_filter , prof -> name );
72
120
73
121
pg -> type = type ;
74
122
pg -> si_filter = si_filter ;
75
- pg -> max_procs = max_procs ;
76
- pg -> min_procs = min_procs ;
123
+ pg -> prof = prof ;
77
124
pg -> fork_func = f1 ;
78
125
pg -> term_func = f2 ;
79
126
pg -> next = NULL ;
80
127
81
- pg -> history_size = PG_HISTORY_DEFAULT_SIZE ;
128
+ pg -> history_size = h_size ;
82
129
pg -> history_map = (unsigned char * )(pg + 1 );
83
130
pg -> history_idx = 0 ;
84
- pg -> no_downscale_cycles = 10 * PG_HISTORY_DEFAULT_SIZE ;
131
+ pg -> no_downscale_cycles = pg -> prof -> down_cycles_delay ;
85
132
86
133
/* add at the end of list, to avoid changing the head of the list due
87
134
* forking */
@@ -108,12 +155,12 @@ void rescale_group_history(struct process_group *pg, unsigned int idx,
108
155
LM_DBG ("rescaling old %d to %d [idx %d]\n" ,
109
156
old , pg -> history_map [k ], k );
110
157
111
- k = k ? (k - 1 ) : (PG_HISTORY_DEFAULT_SIZE - 1 ) ;
158
+ k = k ? (k - 1 ) : (pg -> history_size - 1 ) ;
112
159
} while (k != idx );
113
160
}
114
161
115
162
116
- void check_and_adjust_number_of_workers (void )
163
+ void do_workers_auto_scaling (void )
117
164
{
118
165
struct process_group * pg ;
119
166
unsigned int i , k , idx ;
@@ -142,7 +189,7 @@ void check_and_adjust_number_of_workers(void)
142
189
}
143
190
144
191
/* set the current value */
145
- idx = (pg -> history_idx + 1 )%PG_HISTORY_DEFAULT_SIZE ;
192
+ idx = (pg -> history_idx + 1 )%pg -> history_size ;
146
193
pg -> history_map [idx ] = (unsigned char ) ( load / procs_no );
147
194
148
195
LM_DBG ("group %d (with %d procs) has average load of %d\n" ,
@@ -152,20 +199,24 @@ void check_and_adjust_number_of_workers(void)
152
199
cnt_over = 0 ;
153
200
cnt_under = 0 ;
154
201
k = idx ;
202
+ i = 1 ;
155
203
do {
156
- if (pg -> history_map [k ]> PG_HLOAD_TRESHOLD )
204
+ if ( pg -> history_map [k ] > pg -> prof -> up_threshold &&
205
+ i <= pg -> prof -> up_cycles_tocheck )
157
206
cnt_over ++ ;
158
- else if (pg -> history_map [k ]< PG_LLOAD_TRESHOLD )
207
+ else if ( pg -> history_map [k ] < pg -> prof -> down_threshold &&
208
+ i <= pg -> prof -> down_cycles_tocheck )
159
209
cnt_under ++ ;
160
210
161
- k = k ? (k - 1 ) : (PG_HISTORY_DEFAULT_SIZE - 1 ) ;
211
+ i ++ ;
212
+ k = k ? (k - 1 ) : (pg -> history_size - 1 ) ;
162
213
} while (k != idx );
163
214
164
215
/* decide what to do */
165
- if (cnt_over >= PG_HIGH_MIN_SCORE ) {
166
- if (procs_no < pg -> max_procs ) {
216
+ if ( cnt_over >= pg -> prof -> up_cycles_needed ) {
217
+ if ( procs_no < pg -> prof -> max_procs ) {
167
218
LM_NOTICE ("score %d/%d -> forking new proc in group %d "
168
- "(with %d procs)\n" , cnt_over , PG_HISTORY_DEFAULT_SIZE ,
219
+ "(with %d procs)\n" , cnt_over , pg -> prof -> up_cycles_tocheck ,
169
220
pg -> type , procs_no );
170
221
/* we need to fork one more process here */
171
222
if ( (p_id = pg -> fork_func (pg -> si_filter ))< 0 ||
@@ -174,25 +225,28 @@ void check_and_adjust_number_of_workers(void)
174
225
"(current %d procs)\n" ,pg -> type ,procs_no );
175
226
} else {
176
227
rescale_group_history ( pg , idx , procs_no , +1 );
177
- pg -> no_downscale_cycles = 10 * PG_HISTORY_DEFAULT_SIZE ;
228
+ pg -> no_downscale_cycles = pg -> prof -> down_cycles_delay ;
178
229
}
179
230
}
180
- } else if (cnt_under == PG_HISTORY_DEFAULT_SIZE ) {
181
- if (procs_no > pg -> min_procs && procs_no != 1 &&
231
+ } else if ( pg -> prof -> down_cycles_tocheck != 0 &&
232
+ cnt_under == pg -> prof -> down_cycles_tocheck ) {
233
+ if ( procs_no > pg -> prof -> min_procs &&
182
234
pg -> no_downscale_cycles == 0 ) {
183
235
/* try to estimate the load after downscaling */
184
236
load = 0 ;
185
237
k = idx ;
238
+ i = 0 ;
186
239
do {
187
240
load += pg -> history_map [k ];
188
- k = k ? (k - 1 ) : (PG_HISTORY_DEFAULT_SIZE - 1 ) ;
189
- } while (k != idx );
190
- load = (load * procs_no ) / (procs_no - 1 );
191
- if (load < PG_HLOAD_TRESHOLD ) {
241
+ k = k ? (k - 1 ) : (pg -> history_size - 1 ) ;
242
+ } while ( k != idx && i <= pg -> prof -> down_cycles_tocheck );
243
+ load = (load * procs_no ) /
244
+ (pg -> prof -> down_cycles_tocheck * (procs_no - 1 ));
245
+ if ( load < pg -> prof -> up_threshold ) {
192
246
/* down scale one more process here */
193
- LM_DBG ("score %d/%d -> ripping one proc from group %d "
247
+ LM_NOTICE ("score %d/%d -> ripping one proc from group %d "
194
248
"(with %d procs), estimated load -> %d\n" , cnt_under ,
195
- PG_HISTORY_DEFAULT_SIZE , pg -> type , procs_no ,
249
+ pg -> prof -> down_cycles_tocheck , pg -> type , procs_no ,
196
250
load );
197
251
ipc_send_rpc ( last_idx_in_pg , pg -> term_func , NULL );
198
252
}
0 commit comments