struct timerlat_u_params user;
};
+#define for_each_monitored_cpu(cpu, nr_cpus, common) \
+ for (cpu = 0; cpu < nr_cpus; cpu++) \
+ if (!(common)->cpus || CPU_ISSET(cpu, &(common)->monitored_cpus))
+
struct tool_ops;
/*
if (!params->common.hist.no_index)
trace_seq_printf(s, "Index");
- for (cpu = 0; cpu < data->nr_cpus; cpu++) {
- if (params->common.cpus && !CPU_ISSET(cpu, ¶ms->common.monitored_cpus))
- continue;
+ for_each_monitored_cpu(cpu, data->nr_cpus, ¶ms->common) {
if (!data->hist[cpu].count)
continue;
if (!params->common.hist.no_index)
trace_seq_printf(trace->seq, "count:");
- for (cpu = 0; cpu < data->nr_cpus; cpu++) {
- if (params->common.cpus && !CPU_ISSET(cpu, ¶ms->common.monitored_cpus))
- continue;
+ for_each_monitored_cpu(cpu, data->nr_cpus, ¶ms->common) {
if (!data->hist[cpu].count)
continue;
if (!params->common.hist.no_index)
trace_seq_printf(trace->seq, "min: ");
- for (cpu = 0; cpu < data->nr_cpus; cpu++) {
- if (params->common.cpus && !CPU_ISSET(cpu, ¶ms->common.monitored_cpus))
- continue;
+ for_each_monitored_cpu(cpu, data->nr_cpus, ¶ms->common) {
if (!data->hist[cpu].count)
continue;
if (!params->common.hist.no_index)
trace_seq_printf(trace->seq, "avg: ");
- for (cpu = 0; cpu < data->nr_cpus; cpu++) {
- if (params->common.cpus && !CPU_ISSET(cpu, ¶ms->common.monitored_cpus))
- continue;
+ for_each_monitored_cpu(cpu, data->nr_cpus, ¶ms->common) {
if (!data->hist[cpu].count)
continue;
if (!params->common.hist.no_index)
trace_seq_printf(trace->seq, "max: ");
- for (cpu = 0; cpu < data->nr_cpus; cpu++) {
- if (params->common.cpus && !CPU_ISSET(cpu, ¶ms->common.monitored_cpus))
- continue;
+ for_each_monitored_cpu(cpu, data->nr_cpus, ¶ms->common) {
if (!data->hist[cpu].count)
continue;
trace_seq_printf(trace->seq, "%-6d",
bucket * data->bucket_size);
- for (cpu = 0; cpu < data->nr_cpus; cpu++) {
- if (params->common.cpus && !CPU_ISSET(cpu, ¶ms->common.monitored_cpus))
- continue;
+ for_each_monitored_cpu(cpu, data->nr_cpus, ¶ms->common) {
if (!data->hist[cpu].count)
continue;
if (!params->common.hist.no_index)
trace_seq_printf(trace->seq, "over: ");
- for (cpu = 0; cpu < data->nr_cpus; cpu++) {
- if (params->common.cpus && !CPU_ISSET(cpu, ¶ms->common.monitored_cpus))
- continue;
+ for_each_monitored_cpu(cpu, data->nr_cpus, ¶ms->common) {
if (!data->hist[cpu].count)
continue;
osnoise_top_header(top);
- for (i = 0; i < nr_cpus; i++) {
- if (params->common.cpus && !CPU_ISSET(i, ¶ms->common.monitored_cpus))
- continue;
+ for_each_monitored_cpu(i, nr_cpus, ¶ms->common) {
osnoise_top_print(top, i);
}
nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
- for (i = 0; i < nr_cpus; i++) {
- if (params->common.cpus && !CPU_ISSET(i, ¶ms->common.monitored_cpus))
- continue;
+ for_each_monitored_cpu(i, nr_cpus, ¶ms->common) {
if (save_cpu_idle_disable_state(i) < 0) {
err_msg("Could not save cpu idle state.\n");
return -1;
if (dma_latency_fd >= 0)
close(dma_latency_fd);
if (params->deepest_idle_state >= -1) {
- for (i = 0; i < nr_cpus; i++) {
- if (params->common.cpus &&
- !CPU_ISSET(i, ¶ms->common.monitored_cpus))
- continue;
+ for_each_monitored_cpu(i, nr_cpus, ¶ms->common) {
restore_cpu_idle_disable_state(i);
}
}
if (!params->common.hist.no_index)
trace_seq_printf(s, "Index");
- for (cpu = 0; cpu < data->nr_cpus; cpu++) {
- if (params->common.cpus && !CPU_ISSET(cpu, ¶ms->common.monitored_cpus))
- continue;
+ for_each_monitored_cpu(cpu, data->nr_cpus, ¶ms->common) {
if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
continue;
if (!params->common.hist.no_index)
trace_seq_printf(trace->seq, "count:");
- for (cpu = 0; cpu < data->nr_cpus; cpu++) {
- if (params->common.cpus && !CPU_ISSET(cpu, ¶ms->common.monitored_cpus))
- continue;
+ for_each_monitored_cpu(cpu, data->nr_cpus, ¶ms->common) {
if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
continue;
if (!params->common.hist.no_index)
trace_seq_printf(trace->seq, "min: ");
- for (cpu = 0; cpu < data->nr_cpus; cpu++) {
- if (params->common.cpus && !CPU_ISSET(cpu, ¶ms->common.monitored_cpus))
- continue;
+ for_each_monitored_cpu(cpu, data->nr_cpus, ¶ms->common) {
if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
continue;
if (!params->common.hist.no_index)
trace_seq_printf(trace->seq, "avg: ");
- for (cpu = 0; cpu < data->nr_cpus; cpu++) {
- if (params->common.cpus && !CPU_ISSET(cpu, ¶ms->common.monitored_cpus))
- continue;
+ for_each_monitored_cpu(cpu, data->nr_cpus, ¶ms->common) {
if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
continue;
if (!params->common.hist.no_index)
trace_seq_printf(trace->seq, "max: ");
- for (cpu = 0; cpu < data->nr_cpus; cpu++) {
- if (params->common.cpus && !CPU_ISSET(cpu, ¶ms->common.monitored_cpus))
- continue;
+ for_each_monitored_cpu(cpu, data->nr_cpus, ¶ms->common) {
if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
continue;
sum.min_thread = ~0;
sum.min_user = ~0;
- for (cpu = 0; cpu < data->nr_cpus; cpu++) {
- if (params->common.cpus && !CPU_ISSET(cpu, ¶ms->common.monitored_cpus))
- continue;
+ for_each_monitored_cpu(cpu, data->nr_cpus, ¶ms->common) {
if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
continue;
trace_seq_printf(trace->seq, "%-6d",
bucket * data->bucket_size);
- for (cpu = 0; cpu < data->nr_cpus; cpu++) {
- if (params->common.cpus && !CPU_ISSET(cpu, ¶ms->common.monitored_cpus))
- continue;
+ for_each_monitored_cpu(cpu, data->nr_cpus, ¶ms->common) {
if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
continue;
if (!params->common.hist.no_index)
trace_seq_printf(trace->seq, "over: ");
- for (cpu = 0; cpu < data->nr_cpus; cpu++) {
- if (params->common.cpus && !CPU_ISSET(cpu, ¶ms->common.monitored_cpus))
- continue;
+ for_each_monitored_cpu(cpu, data->nr_cpus, ¶ms->common) {
if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
continue;
timerlat_top_header(params, top);
- for (i = 0; i < nr_cpus; i++) {
- if (params->common.cpus && !CPU_ISSET(i, ¶ms->common.monitored_cpus))
- continue;
+ for_each_monitored_cpu(i, nr_cpus, ¶ms->common) {
timerlat_top_print(top, i);
timerlat_top_update_sum(top, i, &summary);
}