17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 #ifdef TEAMWARE_MAKE_CMN
27
28 /*
29 * parallel.cc
30 *
31 * Deal with the parallel processing
32 */
33
34 /*
35 * Included files
36 */
37 #ifdef DISTRIBUTED
38 #include <avo/strings.h> /* AVO_STRDUP() */
39 #include <dm/Avo_DoJobMsg.h>
40 #include <dm/Avo_MToolJobResultMsg.h>
41 #endif
42 #include <errno.h> /* errno */
43 #include <fcntl.h>
44 #include <mk/defs.h>
45 #include <mksh/dosys.h> /* redirect_io() */
46 #include <mksh/macro.h> /* expand_value() */
47 #include <mksh/misc.h> /* getmem() */
48 #include <sys/signal.h>
49 #include <sys/stat.h>
50 #include <sys/types.h>
51 #include <sys/utsname.h>
52 #include <sys/wait.h>
53 #include <unistd.h>
54 #include <netdb.h>
55
56
57
58 /*
59 * Defined macros
60 */
61 #define MAXRULES 100
72
73
74 /*
75 * Static variables
76 */
77 #ifdef TEAMWARE_MAKE_CMN
78 static Boolean just_did_subtree = false;
79 static char local_host[MAXNAMELEN] = "";
80 static char user_name[MAXNAMELEN] = "";
81 #endif
82 static int pmake_max_jobs = 0;
83 static pid_t process_running = -1;
84 static Running *running_tail = &running_list;
85 static Name subtree_conflict;
86 static Name subtree_conflict2;
87
88
89 /*
90 * File table of contents
91 */
92 #ifdef DISTRIBUTED
93 static void append_dmake_cmd(Avo_DoJobMsg *dmake_job_msg, char *orig_cmd_line, int cmd_options);
94 static void append_job_result_msg(Avo_MToolJobResultMsg *msg, char *outFn, char *errFn);
95 static void send_job_result_msg(Running rp);
96 #endif
97 static void delete_running_struct(Running rp);
98 static Boolean dependency_conflict(Name target);
99 static Doname distribute_process(char **commands, Property line);
100 static void doname_subtree(Name target, Boolean do_get, Boolean implicit);
101 static void dump_out_file(char *filename, Boolean err);
102 static void finish_doname(Running rp);
103 static void maybe_reread_make_state(void);
104 static void process_next(void);
105 static void reset_conditionals(int cnt, Name *targets, Property *locals);
106 static pid_t run_rule_commands(char *host, char **commands);
107 static Property *set_conditionals(int cnt, Name *targets);
108 static void store_conditionals(Running rp);
109
110
111 /*
112 * execute_parallel(line, waitflg)
113 *
114 * DMake 2.x:
115 * parallel mode: spawns a parallel process to execute the command group.
116 * distributed mode: sends the command group down the pipe to rxm.
117 *
118 * Return value:
119 * The result of the execution
120 *
121 * Parameters:
122 * line The command group to execute
123 */
124 Doname
125 execute_parallel(Property line, Boolean waitflg, Boolean local)
126 {
127 int argcnt;
128 int cmd_options = 0;
129 char *commands[MAXRULES + 5];
130 char *cp;
131 #ifdef DISTRIBUTED
132 Avo_DoJobMsg *dmake_job_msg = NULL;
133 #endif
134 Name dmake_name;
135 Name dmake_value;
136 int ignore;
137 Name make_machines_name;
138 char **p;
139 Property prop;
140 Doname result = build_ok;
141 Cmd_line rule;
142 Boolean silent_flag;
143 Name target = line->body.line.target;
144 Boolean wrote_state_file = false;
145
146 if ((pmake_max_jobs == 0) &&
147 (dmake_mode_type == parallel_mode)) {
148 if (local_host[0] == '\0') {
149 (void) gethostname(local_host, MAXNAMELEN);
150 }
151 MBSTOWCS(wcs_buffer, NOCATGETS("DMAKE_MAX_JOBS"));
152 dmake_name = GETNAME(wcs_buffer, FIND_LENGTH);
153 if (((prop = get_prop(dmake_name->prop, macro_prop)) != NULL) &&
160 }
161 } else {
162 /*
163 * For backwards compatibility w/ PMake 1.x, when
164 * DMake 2.x is being run in parallel mode, DMake
165 * should parse the PMake startup file
166 * $(HOME)/.make.machines to get the pmake_max_jobs.
167 */
168 MBSTOWCS(wcs_buffer, NOCATGETS("PMAKE_MACHINESFILE"));
169 dmake_name = GETNAME(wcs_buffer, FIND_LENGTH);
170 if (((prop = get_prop(dmake_name->prop, macro_prop)) != NULL) &&
171 ((dmake_value = prop->body.macro.value) != NULL)) {
172 make_machines_name = dmake_value;
173 } else {
174 make_machines_name = NULL;
175 }
176 if ((pmake_max_jobs = read_make_machines(make_machines_name)) <= 0) {
177 pmake_max_jobs = PMAKE_DEF_MAX_JOBS;
178 }
179 }
180 #ifdef DISTRIBUTED
181 if (send_mtool_msgs) {
182 send_rsrc_info_msg(pmake_max_jobs, local_host, user_name);
183 }
184 #endif
185 }
186
187 if ((dmake_mode_type == serial_mode) ||
188 ((dmake_mode_type == parallel_mode) && (waitflg))) {
189 return (execute_serial(line));
190 }
191
192 #ifdef DISTRIBUTED
193 if (dmake_mode_type == distributed_mode) {
194 if(local) {
195 // return (execute_serial(line));
196 waitflg = true;
197 }
198 dmake_job_msg = new Avo_DoJobMsg();
199 dmake_job_msg->setJobId(++job_msg_id);
200 dmake_job_msg->setTarget(target->string_mb);
201 dmake_job_msg->setImmediateOutput(0);
202 called_make = false;
203 } else
204 #endif
205 {
206 p = commands;
207 }
208
209 argcnt = 0;
210 for (rule = line->body.line.command_used;
211 rule != NULL;
212 rule = rule->next) {
213 if (posix && (touch || quest) && !rule->always_exec) {
214 continue;
215 }
216 if (vpath_defined) {
217 rule->command_line =
218 vpath_translation(rule->command_line);
219 }
220 if (dmake_mode_type == distributed_mode) {
221 cmd_options = 0;
222 if(local) {
223 cmd_options |= local_host_mask;
224 }
225 } else {
226 silent_flag = false;
227 ignore = 0;
228 }
229 if (rule->command_line->hash.length > 0) {
230 if (++argcnt == MAXRULES) {
231 if (dmake_mode_type == distributed_mode) {
232 /* XXX - tell rxm to execute on local host. */
233 /* I WAS HERE!!! */
234 } else {
235 /* Too many rules, run serially instead. */
236 return build_serial;
237 }
238 }
239 #ifdef DISTRIBUTED
240 if (dmake_mode_type == distributed_mode) {
241 /*
242 * XXX - set assign_mask to tell rxm
243 * to do the following.
244 */
245 /* From execute_serial():
246 if (rule->assign) {
247 result = build_ok;
248 do_assign(rule->command_line, target);
249 */
250 if (0) {
251 } else if (report_dependencies_level == 0) {
252 if (rule->ignore_error) {
253 cmd_options |= ignore_mask;
254 }
255 if (rule->silent) {
256 cmd_options |= silent_mask;
257 }
258 if (rule->command_line->meta) {
259 cmd_options |= meta_mask;
260 }
261 if (rule->make_refd) {
262 cmd_options |= make_refd_mask;
263 }
264 if (do_not_exec_rule) {
265 cmd_options |= do_not_exec_mask;
266 }
267 append_dmake_cmd(dmake_job_msg,
268 rule->command_line->string_mb,
269 cmd_options);
270 /* Copying dosys()... */
271 if (rule->make_refd) {
272 if (waitflg) {
273 dmake_job_msg->setImmediateOutput(1);
274 }
275 called_make = true;
276 if (command_changed &&
277 !wrote_state_file) {
278 write_state_file(0, false);
279 wrote_state_file = true;
280 }
281 }
282 }
283 } else
284 #endif
285 {
286 if (rule->silent && !silent) {
287 silent_flag = true;
288 }
289 if (rule->ignore_error) {
290 ignore++;
291 }
292 /* XXX - need to add support for + prefix */
293 if (silent_flag || ignore) {
294 *p = getmem((silent_flag ? 1 : 0) +
295 ignore +
296 (strlen(rule->
297 command_line->
298 string_mb)) +
299 1);
300 cp = *p++;
301 if (silent_flag) {
302 *cp++ = (int) at_char;
303 }
304 if (ignore) {
305 *cp++ = (int) hyphen_char;
306 }
307 (void) strcpy(cp, rule->command_line->string_mb);
308 } else {
309 *p++ = rule->command_line->string_mb;
310 }
311 }
312 }
313 }
314 if ((argcnt == 0) ||
315 (report_dependencies_level > 0)) {
316 #ifdef DISTRIBUTED
317 if (dmake_job_msg) {
318 delete dmake_job_msg;
319 }
320 #endif
321 return build_ok;
322 }
323 #ifdef DISTRIBUTED
324 if (dmake_mode_type == distributed_mode) {
325 // Send a DoJob message to the rxm process.
326 distribute_rxm(dmake_job_msg);
327
328 // Wait for an acknowledgement.
329 Avo_AcknowledgeMsg *ackMsg = getAcknowledgeMsg();
330 if (ackMsg) {
331 delete ackMsg;
332 }
333
334 if (waitflg) {
335 // Wait for, and process a job result.
336 result = await_dist(waitflg);
337 if (called_make) {
338 maybe_reread_make_state();
339 }
340 check_state(temp_file_name);
341 if (result == build_failed) {
342 if (!continue_after_error) {
343
344 #ifdef PRINT_EXIT_STATUS
345 warning(NOCATGETS("I'm in execute_parallel. await_dist() returned build_failed"));
346 #endif
347
348 fatal(catgets(catd, 1, 252, "Command failed for target `%s'"),
349 target->string_mb);
350 }
351 /*
352 * Make sure a failing command is not
353 * saved in .make.state.
354 */
355 line->body.line.command_used = NULL;
356 }
357 if (temp_file_name != NULL) {
358 free_name(temp_file_name);
359 }
360 temp_file_name = NULL;
361 Property spro = get_prop(sunpro_dependencies->prop, macro_prop);
362 if(spro != NULL) {
363 Name val = spro->body.macro.value;
364 if(val != NULL) {
365 free_name(val);
366 spro->body.macro.value = NULL;
367 }
368 }
369 spro = get_prop(sunpro_dependencies->prop, env_mem_prop);
370 if(spro) {
371 char *val = spro->body.env_mem.value;
372 if(val != NULL) {
373 retmem_mb(val);
374 spro->body.env_mem.value = NULL;
375 }
376 }
377 return result;
378 } else {
379 parallel_process_cnt++;
380 return build_running;
381 }
382 } else
383 #endif
384 {
385 *p = NULL;
386
387 Doname res = distribute_process(commands, line);
388 if (res == build_running) {
389 parallel_process_cnt++;
390 }
391
392 /*
393 * Return only those memory that were specially allocated
394 * for part of commands.
395 */
396 for (int i = 0; commands[i] != NULL; i++) {
397 if ((commands[i][0] == (int) at_char) ||
398 (commands[i][0] == (int) hyphen_char)) {
399 retmem_mb(commands[i]);
400 }
401 }
402 return res;
403 }
404 }
405
406 #ifdef DISTRIBUTED
407 /*
408 * append_dmake_cmd()
409 *
410 * Replaces all escaped newline's (\<cr>)
411 * in the original command line with space's,
412 * then append the new command line to the DoJobMsg object.
413 */
414 static void
415 append_dmake_cmd(Avo_DoJobMsg *dmake_job_msg,
416 char *orig_cmd_line,
417 int cmd_options)
418 {
419 /*
420 Avo_DmakeCommand *tmp_dmake_command;
421
422 tmp_dmake_command = new Avo_DmakeCommand(orig_cmd_line, cmd_options);
423 dmake_job_msg->appendCmd(tmp_dmake_command);
424 delete tmp_dmake_command;
425 */
426 dmake_job_msg->appendCmd(new Avo_DmakeCommand(orig_cmd_line, cmd_options));
427 }
428 #endif
429
430 #ifdef TEAMWARE_MAKE_CMN
431 #define MAXJOBS_ADJUST_RFE4694000
432
433 #ifdef MAXJOBS_ADJUST_RFE4694000
434
435 #include <unistd.h> /* sysconf(_SC_NPROCESSORS_ONLN) */
436 #include <sys/ipc.h> /* ftok() */
437 #include <sys/shm.h> /* shmget(), shmat(), shmdt(), shmctl() */
438 #include <semaphore.h> /* sem_init(), sem_trywait(), sem_post(), sem_destroy() */
439 #include <sys/loadavg.h> /* getloadavg() */
440
441 /*
442 * adjust_pmake_max_jobs (int pmake_max_jobs)
443 *
444 * Parameters:
445 * pmake_max_jobs - max jobs limit set by user
446 *
447 * External functions used:
448 * sysconf()
778 }
779 }
780 if (res < 0) {
781 /* job adjustment error */
782 job_adjust_error();
783
784 /* no adjustment */
785 while (parallel_process_cnt >= pmake_max_jobs) {
786 await_parallel(false);
787 finish_children(true);
788 }
789 }
790 break;
791 default:
792 while (parallel_process_cnt >= pmake_max_jobs) {
793 await_parallel(false);
794 finish_children(true);
795 }
796 }
797 #endif /* TEAMWARE_MAKE_CMN && MAXJOBS_ADJUST_RFE4694000 */
798 #ifdef DISTRIBUTED
799 if (send_mtool_msgs) {
800 send_job_start_msg(line);
801 }
802 #endif
803 #ifdef DISTRIBUTED
804 setvar_envvar((Avo_DoJobMsg *)NULL);
805 #else
806 setvar_envvar();
807 #endif
808 /*
809 * Tell the user what DMake is doing.
810 */
811 if (!silent && output_mode != txt2_mode) {
812 /*
813 * Print local_host --> x job(s).
814 */
815 (void) fprintf(stdout,
816 catgets(catd, 1, 325, "%s --> %d %s\n"),
817 local_host,
818 parallel_process_cnt + 1,
819 (parallel_process_cnt == 0) ? catgets(catd, 1, 124, "job") : catgets(catd, 1, 125, "jobs"));
820
821 /* Print command line(s). */
822 tmp_index = 0;
823 while (commands[tmp_index] != NULL) {
824 /* No @ char. */
825 /* XXX - need to add [2] when + prefix is added */
826 if ((commands[tmp_index][0] != (int) at_char) &&
827 (commands[tmp_index][1] != (int) at_char)) {
928 }
929 target->checking_subtree = false;
930 running_list = save_running_list;
931 running_tail = save_running_tail;
932 }
933
934 /*
935 * finish_running()
936 *
937 * Keeps processing until the running_list is emptied out.
938 *
939 * Parameters:
940 *
941 * Global variables used:
942 * running_list The list of running processes
943 */
944 void
945 finish_running(void)
946 {
947 while (running_list != NULL) {
948 #ifdef DISTRIBUTED
949 if (dmake_mode_type == distributed_mode) {
950 if ((just_did_subtree) ||
951 (parallel_process_cnt == 0)) {
952 just_did_subtree = false;
953 } else {
954 (void) await_dist(false);
955 finish_children(true);
956 }
957 } else
958 #endif
959 {
960 await_parallel(false);
961 finish_children(true);
962 }
963 if (running_list != NULL) {
964 process_next();
965 }
966 }
967 }
968
969 /*
970 * process_next()
971 *
972 * Searches the running list for any targets which can start processing.
973 * This can be a pending target, a serial target, or a subtree target.
974 *
975 * Parameters:
976 *
977 * Static variables used:
978 * running_tail The end of the list of running procs
1102 }
1103 /*
1104 * If nothing has been found to build and there exists a subtree
1105 * target with no dependency conflicts, build it.
1106 */
1107 if (quiescent) {
1108 start_loop_3:
1109 for (rp_prev = &running_list, rp = running_list;
1110 rp != NULL;
1111 rp = rp->next) {
1112 if (rp->state == build_subtree) {
1113 if (!dependency_conflict(rp->target)) {
1114 *rp_prev = rp->next;
1115 if (rp->next == NULL) {
1116 running_tail = rp_prev;
1117 }
1118 recursion_level = rp->recursion_level;
1119 doname_subtree(rp->target,
1120 rp->do_get,
1121 rp->implicit);
1122 #ifdef DISTRIBUTED
1123 just_did_subtree = true;
1124 #endif
1125 quiescent = false;
1126 delete_running_struct(rp);
1127 goto start_loop_3;
1128 } else {
1129 subtree_target = rp_prev;
1130 rp_prev = &rp->next;
1131 }
1132 } else {
1133 rp_prev = &rp->next;
1134 }
1135 }
1136 }
1137 /*
1138 * If still nothing found to build, we either have a deadlock
1139 * or a subtree with a dependency conflict with something waiting
1140 * to build.
1141 */
1142 if (quiescent) {
1143 if (subtree_target == NULL) {
1144 fatal(catgets(catd, 1, 126, "Internal error: deadlock detected in process_next"));
1145 } else {
1146 rp = *subtree_target;
1147 if (debug_level > 0) {
1148 warning(catgets(catd, 1, 127, "Conditional macro conflict encountered for %s between %s and %s"),
1149 subtree_conflict2->string_mb,
1150 rp->target->string_mb,
1151 subtree_conflict->string_mb);
1152 }
1153 *subtree_target = (*subtree_target)->next;
1154 if (rp->next == NULL) {
1155 running_tail = subtree_target;
1156 }
1157 recursion_level = rp->recursion_level;
1158 doname_subtree(rp->target, rp->do_get, rp->implicit);
1159 #ifdef DISTRIBUTED
1160 just_did_subtree = true;
1161 #endif
1162 delete_running_struct(rp);
1163 }
1164 }
1165 }
1166
1167 /*
1168 * set_conditionals(cnt, targets)
1169 *
1170 * Sets the conditional macros for the targets given in the array of
1171 * targets. The old macro values are returned in an array of
1172 * Properties for later resetting.
1173 *
1174 * Return value:
1175 * Array of conditional macro settings
1176 *
1177 * Parameters:
1178 * cnt Number of targets
1179 * targets Array of targets
1180 */
1181 static Property *
1413 * finished building.
1414 * In parallel_mode, output the accumulated stdout/stderr.
1415 * Read the auto dependency stuff, handle a failed build,
1416 * update the target, then finish the doname process for
1417 * that target.
1418 */
1419 if (rp->state == build_ok || rp->state == build_failed) {
1420 *rp_prev = rp->next;
1421 if (rp->next == NULL) {
1422 running_tail = rp_prev;
1423 }
1424 if ((line2 = rp->command) == NULL) {
1425 line2 = get_prop(rp->target->prop, line_prop);
1426 }
1427 if (dmake_mode_type == distributed_mode) {
1428 if (rp->make_refd) {
1429 maybe_reread_make_state();
1430 }
1431 } else {
1432 /*
1433 * Send an Avo_MToolJobResultMsg to maketool.
1434 */
1435 #ifdef DISTRIBUTED
1436 if (send_mtool_msgs) {
1437 send_job_result_msg(rp);
1438 }
1439 #endif
1440 /*
1441 * Check if there were any job output
1442 * from the parallel build.
1443 */
1444 if (rp->stdout_file != NULL) {
1445 if (stat(rp->stdout_file, &out_buf) < 0) {
1446 fatal(catgets(catd, 1, 130, "stat of %s failed: %s"),
1447 rp->stdout_file,
1448 errmsg(errno));
1449 }
1450 if ((line2 != NULL) &&
1451 (out_buf.st_size > 0)) {
1452 cmds_length = 0;
1453 for (rule = line2->body.line.command_used,
1454 silent_flag = silent;
1455 rule != NULL;
1456 rule = rule->next) {
1457 cmds_length += rule->command_line->hash.length + 1;
1458 silent_flag = BOOLEAN(silent_flag || rule->silent);
1459 }
1460 if (out_buf.st_size != cmds_length || silent_flag ||
1728 Running rp;
1729 Name *p;
1730
1731 rp = new_running_struct();
1732 rp->state = build_running;
1733 rp->target = target;
1734 rp->true_target = true_target;
1735 rp->command = command;
1736 rp->recursion_level = recursion_level;
1737 rp->do_get = do_get;
1738 rp->implicit = implicit;
1739 rp->auto_count = auto_count;
1740 if (auto_count > 0) {
1741 rp->automatics = (Name *) getmem(auto_count * sizeof (Name));
1742 for (p = rp->automatics; auto_count > 0; auto_count--) {
1743 *p++ = *automatics++;
1744 }
1745 } else {
1746 rp->automatics = NULL;
1747 }
1748 #ifdef DISTRIBUTED
1749 if (dmake_mode_type == distributed_mode) {
1750 rp->make_refd = called_make;
1751 called_make = false;
1752 } else
1753 #endif
1754 {
1755 rp->pid = process_running;
1756 process_running = -1;
1757 childPid = -1;
1758 }
1759 rp->job_msg_id = job_msg_id;
1760 rp->stdout_file = stdout_file;
1761 rp->stderr_file = stderr_file;
1762 rp->temp_file = temp_file_name;
1763 rp->redo = false;
1764 rp->next = NULL;
1765 store_conditionals(rp);
1766 stdout_file = NULL;
1767 stderr_file = NULL;
1768 temp_file_name = NULL;
1769 current_target = NULL;
1770 current_line = NULL;
1771 *running_tail = rp;
1772 running_tail = &rp->next;
1773 }
2092 (void) exists(make_state);
2093 if (make_state_before == make_state->stat.time) {
2094 return;
2095 }
2096 makefile_type = reading_statefile;
2097 if (read_trace_level > 1) {
2098 trace_reader = true;
2099 }
2100 temp_file_number++;
2101 (void) read_simple_file(make_state,
2102 false,
2103 false,
2104 false,
2105 false,
2106 false,
2107 true);
2108 trace_reader = false;
2109 }
2110 }
2111
2112 #ifdef DISTRIBUTED
2113 /*
2114 * Create and send an Avo_MToolJobResultMsg.
2115 */
2116 static void
2117 send_job_result_msg(Running rp)
2118 {
2119 Avo_MToolJobResultMsg *msg;
2120 RWCollectable *xdr_msg;
2121
2122 msg = new Avo_MToolJobResultMsg();
2123 msg->setResult(rp->job_msg_id,
2124 (rp->state == build_ok) ? 0 : 1,
2125 DONE);
2126 append_job_result_msg(msg,
2127 rp->stdout_file,
2128 rp->stderr_file);
2129
2130 xdr_msg = (RWCollectable *)msg;
2131 xdr(get_xdrs_ptr(), xdr_msg);
2132 (void) fflush(get_mtool_msgs_fp());
2133
2134 delete msg;
2135 }
2136
2137 /*
2138 * Append the stdout/err to Avo_MToolJobResultMsg.
2139 */
2140 static void
2141 append_job_result_msg(Avo_MToolJobResultMsg *msg, char *outFn, char *errFn)
2142 {
2143 FILE *fp;
2144 char line[MAXPATHLEN];
2145
2146 fp = fopen(outFn, "r");
2147 if (fp == NULL) {
2148 /* Hmmm... what should we do here? */
2149 return;
2150 }
2151 while (fgets(line, MAXPATHLEN, fp) != NULL) {
2152 if (line[strlen(line) - 1] == '\n') {
2153 line[strlen(line) - 1] = '\0';
2154 }
2155 msg->appendOutput(AVO_STRDUP(line));
2156 }
2157 (void) fclose(fp);
2158 }
2159 #endif
2160
2161 static void
2162 delete_running_struct(Running rp)
2163 {
2164 if ((rp->conditional_cnt > 0) &&
2165 (rp->conditional_targets != NULL)) {
2166 retmem_mb((char *) rp->conditional_targets);
2167 }
2168 /**/
2169 if ((rp->auto_count > 0) &&
2170 (rp->automatics != NULL)) {
2171 retmem_mb((char *) rp->automatics);
2172 }
2173 /**/
2174 if(rp->sprodep_value) {
2175 free_name(rp->sprodep_value);
2176 }
2177 if(rp->sprodep_env) {
2178 retmem_mb(rp->sprodep_env);
2179 }
|
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 #ifdef TEAMWARE_MAKE_CMN
27
28 /*
29 * parallel.cc
30 *
31 * Deal with the parallel processing
32 */
33
34 /*
35 * Included files
36 */
37 #include <errno.h> /* errno */
38 #include <fcntl.h>
39 #include <mk/defs.h>
40 #include <mksh/dosys.h> /* redirect_io() */
41 #include <mksh/macro.h> /* expand_value() */
42 #include <mksh/misc.h> /* getmem() */
43 #include <sys/signal.h>
44 #include <sys/stat.h>
45 #include <sys/types.h>
46 #include <sys/utsname.h>
47 #include <sys/wait.h>
48 #include <unistd.h>
49 #include <netdb.h>
50
51
52
53 /*
54 * Defined macros
55 */
56 #define MAXRULES 100
67
68
69 /*
70 * Static variables
71 */
72 #ifdef TEAMWARE_MAKE_CMN
73 static Boolean just_did_subtree = false;
74 static char local_host[MAXNAMELEN] = "";
75 static char user_name[MAXNAMELEN] = "";
76 #endif
77 static int pmake_max_jobs = 0;
78 static pid_t process_running = -1;
79 static Running *running_tail = &running_list;
80 static Name subtree_conflict;
81 static Name subtree_conflict2;
82
83
84 /*
85 * File table of contents
86 */
87 static void delete_running_struct(Running rp);
88 static Boolean dependency_conflict(Name target);
89 static Doname distribute_process(char **commands, Property line);
90 static void doname_subtree(Name target, Boolean do_get, Boolean implicit);
91 static void dump_out_file(char *filename, Boolean err);
92 static void finish_doname(Running rp);
93 static void maybe_reread_make_state(void);
94 static void process_next(void);
95 static void reset_conditionals(int cnt, Name *targets, Property *locals);
96 static pid_t run_rule_commands(char *host, char **commands);
97 static Property *set_conditionals(int cnt, Name *targets);
98 static void store_conditionals(Running rp);
99
100
101 /*
102 * execute_parallel(line, waitflg)
103 *
104 * DMake 2.x:
105 * parallel mode: spawns a parallel process to execute the command group.
106 * distributed mode: sends the command group down the pipe to rxm.
107 *
108 * Return value:
109 * The result of the execution
110 *
111 * Parameters:
112 * line The command group to execute
113 */
114 Doname
115 execute_parallel(Property line, Boolean waitflg, Boolean local)
116 {
117 int argcnt;
118 int cmd_options = 0;
119 char *commands[MAXRULES + 5];
120 char *cp;
121 Name dmake_name;
122 Name dmake_value;
123 int ignore;
124 Name make_machines_name;
125 char **p;
126 Property prop;
127 Doname result = build_ok;
128 Cmd_line rule;
129 Boolean silent_flag;
130 Name target = line->body.line.target;
131 Boolean wrote_state_file = false;
132
133 if ((pmake_max_jobs == 0) &&
134 (dmake_mode_type == parallel_mode)) {
135 if (local_host[0] == '\0') {
136 (void) gethostname(local_host, MAXNAMELEN);
137 }
138 MBSTOWCS(wcs_buffer, NOCATGETS("DMAKE_MAX_JOBS"));
139 dmake_name = GETNAME(wcs_buffer, FIND_LENGTH);
140 if (((prop = get_prop(dmake_name->prop, macro_prop)) != NULL) &&
147 }
148 } else {
149 /*
150 * For backwards compatibility w/ PMake 1.x, when
151 * DMake 2.x is being run in parallel mode, DMake
152 * should parse the PMake startup file
153 * $(HOME)/.make.machines to get the pmake_max_jobs.
154 */
155 MBSTOWCS(wcs_buffer, NOCATGETS("PMAKE_MACHINESFILE"));
156 dmake_name = GETNAME(wcs_buffer, FIND_LENGTH);
157 if (((prop = get_prop(dmake_name->prop, macro_prop)) != NULL) &&
158 ((dmake_value = prop->body.macro.value) != NULL)) {
159 make_machines_name = dmake_value;
160 } else {
161 make_machines_name = NULL;
162 }
163 if ((pmake_max_jobs = read_make_machines(make_machines_name)) <= 0) {
164 pmake_max_jobs = PMAKE_DEF_MAX_JOBS;
165 }
166 }
167 }
168
169 if ((dmake_mode_type == serial_mode) ||
170 ((dmake_mode_type == parallel_mode) && (waitflg))) {
171 return (execute_serial(line));
172 }
173
174 {
175 p = commands;
176 }
177
178 argcnt = 0;
179 for (rule = line->body.line.command_used;
180 rule != NULL;
181 rule = rule->next) {
182 if (posix && (touch || quest) && !rule->always_exec) {
183 continue;
184 }
185 if (vpath_defined) {
186 rule->command_line =
187 vpath_translation(rule->command_line);
188 }
189 if (dmake_mode_type == distributed_mode) {
190 cmd_options = 0;
191 if(local) {
192 cmd_options |= local_host_mask;
193 }
194 } else {
195 silent_flag = false;
196 ignore = 0;
197 }
198 if (rule->command_line->hash.length > 0) {
199 if (++argcnt == MAXRULES) {
200 if (dmake_mode_type == distributed_mode) {
201 /* XXX - tell rxm to execute on local host. */
202 /* I WAS HERE!!! */
203 } else {
204 /* Too many rules, run serially instead. */
205 return build_serial;
206 }
207 }
208 {
209 if (rule->silent && !silent) {
210 silent_flag = true;
211 }
212 if (rule->ignore_error) {
213 ignore++;
214 }
215 /* XXX - need to add support for + prefix */
216 if (silent_flag || ignore) {
217 *p = getmem((silent_flag ? 1 : 0) +
218 ignore +
219 (strlen(rule->
220 command_line->
221 string_mb)) +
222 1);
223 cp = *p++;
224 if (silent_flag) {
225 *cp++ = (int) at_char;
226 }
227 if (ignore) {
228 *cp++ = (int) hyphen_char;
229 }
230 (void) strcpy(cp, rule->command_line->string_mb);
231 } else {
232 *p++ = rule->command_line->string_mb;
233 }
234 }
235 }
236 }
237 if ((argcnt == 0) ||
238 (report_dependencies_level > 0)) {
239 return build_ok;
240 }
241 {
242 *p = NULL;
243
244 Doname res = distribute_process(commands, line);
245 if (res == build_running) {
246 parallel_process_cnt++;
247 }
248
249 /*
250 * Return only those memory that were specially allocated
251 * for part of commands.
252 */
253 for (int i = 0; commands[i] != NULL; i++) {
254 if ((commands[i][0] == (int) at_char) ||
255 (commands[i][0] == (int) hyphen_char)) {
256 retmem_mb(commands[i]);
257 }
258 }
259 return res;
260 }
261 }
262
263
264 #ifdef TEAMWARE_MAKE_CMN
265 #define MAXJOBS_ADJUST_RFE4694000
266
267 #ifdef MAXJOBS_ADJUST_RFE4694000
268
269 #include <unistd.h> /* sysconf(_SC_NPROCESSORS_ONLN) */
270 #include <sys/ipc.h> /* ftok() */
271 #include <sys/shm.h> /* shmget(), shmat(), shmdt(), shmctl() */
272 #include <semaphore.h> /* sem_init(), sem_trywait(), sem_post(), sem_destroy() */
273 #include <sys/loadavg.h> /* getloadavg() */
274
275 /*
276 * adjust_pmake_max_jobs (int pmake_max_jobs)
277 *
278 * Parameters:
279 * pmake_max_jobs - max jobs limit set by user
280 *
281 * External functions used:
282 * sysconf()
612 }
613 }
614 if (res < 0) {
615 /* job adjustment error */
616 job_adjust_error();
617
618 /* no adjustment */
619 while (parallel_process_cnt >= pmake_max_jobs) {
620 await_parallel(false);
621 finish_children(true);
622 }
623 }
624 break;
625 default:
626 while (parallel_process_cnt >= pmake_max_jobs) {
627 await_parallel(false);
628 finish_children(true);
629 }
630 }
631 #endif /* TEAMWARE_MAKE_CMN && MAXJOBS_ADJUST_RFE4694000 */
632 setvar_envvar();
633 /*
634 * Tell the user what DMake is doing.
635 */
636 if (!silent && output_mode != txt2_mode) {
637 /*
638 * Print local_host --> x job(s).
639 */
640 (void) fprintf(stdout,
641 catgets(catd, 1, 325, "%s --> %d %s\n"),
642 local_host,
643 parallel_process_cnt + 1,
644 (parallel_process_cnt == 0) ? catgets(catd, 1, 124, "job") : catgets(catd, 1, 125, "jobs"));
645
646 /* Print command line(s). */
647 tmp_index = 0;
648 while (commands[tmp_index] != NULL) {
649 /* No @ char. */
650 /* XXX - need to add [2] when + prefix is added */
651 if ((commands[tmp_index][0] != (int) at_char) &&
652 (commands[tmp_index][1] != (int) at_char)) {
753 }
754 target->checking_subtree = false;
755 running_list = save_running_list;
756 running_tail = save_running_tail;
757 }
758
759 /*
760 * finish_running()
761 *
762 * Keeps processing until the running_list is emptied out.
763 *
764 * Parameters:
765 *
766 * Global variables used:
767 * running_list The list of running processes
768 */
769 void
770 finish_running(void)
771 {
772 while (running_list != NULL) {
773 {
774 await_parallel(false);
775 finish_children(true);
776 }
777 if (running_list != NULL) {
778 process_next();
779 }
780 }
781 }
782
783 /*
784 * process_next()
785 *
786 * Searches the running list for any targets which can start processing.
787 * This can be a pending target, a serial target, or a subtree target.
788 *
789 * Parameters:
790 *
791 * Static variables used:
792 * running_tail The end of the list of running procs
916 }
917 /*
918 * If nothing has been found to build and there exists a subtree
919 * target with no dependency conflicts, build it.
920 */
921 if (quiescent) {
922 start_loop_3:
923 for (rp_prev = &running_list, rp = running_list;
924 rp != NULL;
925 rp = rp->next) {
926 if (rp->state == build_subtree) {
927 if (!dependency_conflict(rp->target)) {
928 *rp_prev = rp->next;
929 if (rp->next == NULL) {
930 running_tail = rp_prev;
931 }
932 recursion_level = rp->recursion_level;
933 doname_subtree(rp->target,
934 rp->do_get,
935 rp->implicit);
936 quiescent = false;
937 delete_running_struct(rp);
938 goto start_loop_3;
939 } else {
940 subtree_target = rp_prev;
941 rp_prev = &rp->next;
942 }
943 } else {
944 rp_prev = &rp->next;
945 }
946 }
947 }
948 /*
949 * If still nothing found to build, we either have a deadlock
950 * or a subtree with a dependency conflict with something waiting
951 * to build.
952 */
953 if (quiescent) {
954 if (subtree_target == NULL) {
955 fatal(catgets(catd, 1, 126, "Internal error: deadlock detected in process_next"));
956 } else {
957 rp = *subtree_target;
958 if (debug_level > 0) {
959 warning(catgets(catd, 1, 127, "Conditional macro conflict encountered for %s between %s and %s"),
960 subtree_conflict2->string_mb,
961 rp->target->string_mb,
962 subtree_conflict->string_mb);
963 }
964 *subtree_target = (*subtree_target)->next;
965 if (rp->next == NULL) {
966 running_tail = subtree_target;
967 }
968 recursion_level = rp->recursion_level;
969 doname_subtree(rp->target, rp->do_get, rp->implicit);
970 delete_running_struct(rp);
971 }
972 }
973 }
974
975 /*
976 * set_conditionals(cnt, targets)
977 *
978 * Sets the conditional macros for the targets given in the array of
979 * targets. The old macro values are returned in an array of
980 * Properties for later resetting.
981 *
982 * Return value:
983 * Array of conditional macro settings
984 *
985 * Parameters:
986 * cnt Number of targets
987 * targets Array of targets
988 */
989 static Property *
1221 * finished building.
1222 * In parallel_mode, output the accumulated stdout/stderr.
1223 * Read the auto dependency stuff, handle a failed build,
1224 * update the target, then finish the doname process for
1225 * that target.
1226 */
1227 if (rp->state == build_ok || rp->state == build_failed) {
1228 *rp_prev = rp->next;
1229 if (rp->next == NULL) {
1230 running_tail = rp_prev;
1231 }
1232 if ((line2 = rp->command) == NULL) {
1233 line2 = get_prop(rp->target->prop, line_prop);
1234 }
1235 if (dmake_mode_type == distributed_mode) {
1236 if (rp->make_refd) {
1237 maybe_reread_make_state();
1238 }
1239 } else {
1240 /*
1241 * Check if there were any job output
1242 * from the parallel build.
1243 */
1244 if (rp->stdout_file != NULL) {
1245 if (stat(rp->stdout_file, &out_buf) < 0) {
1246 fatal(catgets(catd, 1, 130, "stat of %s failed: %s"),
1247 rp->stdout_file,
1248 errmsg(errno));
1249 }
1250 if ((line2 != NULL) &&
1251 (out_buf.st_size > 0)) {
1252 cmds_length = 0;
1253 for (rule = line2->body.line.command_used,
1254 silent_flag = silent;
1255 rule != NULL;
1256 rule = rule->next) {
1257 cmds_length += rule->command_line->hash.length + 1;
1258 silent_flag = BOOLEAN(silent_flag || rule->silent);
1259 }
1260 if (out_buf.st_size != cmds_length || silent_flag ||
1528 Running rp;
1529 Name *p;
1530
1531 rp = new_running_struct();
1532 rp->state = build_running;
1533 rp->target = target;
1534 rp->true_target = true_target;
1535 rp->command = command;
1536 rp->recursion_level = recursion_level;
1537 rp->do_get = do_get;
1538 rp->implicit = implicit;
1539 rp->auto_count = auto_count;
1540 if (auto_count > 0) {
1541 rp->automatics = (Name *) getmem(auto_count * sizeof (Name));
1542 for (p = rp->automatics; auto_count > 0; auto_count--) {
1543 *p++ = *automatics++;
1544 }
1545 } else {
1546 rp->automatics = NULL;
1547 }
1548 {
1549 rp->pid = process_running;
1550 process_running = -1;
1551 childPid = -1;
1552 }
1553 rp->job_msg_id = job_msg_id;
1554 rp->stdout_file = stdout_file;
1555 rp->stderr_file = stderr_file;
1556 rp->temp_file = temp_file_name;
1557 rp->redo = false;
1558 rp->next = NULL;
1559 store_conditionals(rp);
1560 stdout_file = NULL;
1561 stderr_file = NULL;
1562 temp_file_name = NULL;
1563 current_target = NULL;
1564 current_line = NULL;
1565 *running_tail = rp;
1566 running_tail = &rp->next;
1567 }
1886 (void) exists(make_state);
1887 if (make_state_before == make_state->stat.time) {
1888 return;
1889 }
1890 makefile_type = reading_statefile;
1891 if (read_trace_level > 1) {
1892 trace_reader = true;
1893 }
1894 temp_file_number++;
1895 (void) read_simple_file(make_state,
1896 false,
1897 false,
1898 false,
1899 false,
1900 false,
1901 true);
1902 trace_reader = false;
1903 }
1904 }
1905
1906
1907 static void
1908 delete_running_struct(Running rp)
1909 {
1910 if ((rp->conditional_cnt > 0) &&
1911 (rp->conditional_targets != NULL)) {
1912 retmem_mb((char *) rp->conditional_targets);
1913 }
1914 /**/
1915 if ((rp->auto_count > 0) &&
1916 (rp->automatics != NULL)) {
1917 retmem_mb((char *) rp->automatics);
1918 }
1919 /**/
1920 if(rp->sprodep_value) {
1921 free_name(rp->sprodep_value);
1922 }
1923 if(rp->sprodep_env) {
1924 retmem_mb(rp->sprodep_env);
1925 }
|