Simple Performance Counters

This patch allows the use of simple performance counters to measure time
intervals in the kernel source code. This allows a detailed analysis of the
time spend and the amount of data processed in specific code sections of the
kernel.

Time is measured using the cycle counter (TSC on IA32, ITC on IA64) which has
a very low latency.

To use add #include <linux/perf.h> to the header of the file where the
measurement needs to take place.

Then add the folowing to the code:

To declare a time stamp do

	PC_T xx;

To mark the beginning of the time measurement do

	PC_START(xx)

(If measurement from the beginning of a function is desired one may use
INITIALIZED_PC_T(xx) instead).

To mark the end of the time frame do:

	PC_STOP(x, "name-for-proc");

or if the amount of data transferred needs to be measured as well:

	PC_THROUGHPUT(xx, "name-for-proc", number-of-bytes);


The patch puts three performance counters into critical kernel paths to show
how its done.

The measurements will show up in /proc/perf/all. Processor specific statistics
may be obtained via /proc/perf/<nr-of-processor>. Writing to /proc/perf/reset
will reset all counters. F.e.

echo >/proc/perf/reset

Sample output:

AllocPages               786882 (+  0) 9.9s(223ns/12.6us/48.6us) 12.9gb(16.4kb/16.4kb/32.8kb)
FaultTime                786855 (+192) 10.4s(198ns/13.2us/323.6us)
PrepZeroPage             786765 (+  0) 9.2s(549ns/11.7us/48.1us) 12.9gb(16.4kb/16.4kb/16.4kb)

The first countr is the number of times that the time measurement was performed.
(+ xx) is the number of samples that were thrown away since the processor on
which the process is running changed. Cycle counters are not consistent
across different processors.

Then follows the sum of the time spend in the code segment followed in parentheses
by the minimum / average / maximum time spent there. The second block are the sizes
of data processed. In this sample 12.9 GB was allocated via AllocPages. Most
allocations were 16k = 1 page although there were also 32K (2 page) allocations.

This patch is IA64 specific for now. To support other arches one needs
to define cycles_to_ns in kernel/proc.c

Signed-off-by: Christoph Lameter <clameter@sgi.com>

Index: linux-2.6.21-rc5-mm4/kernel/Makefile
===================================================================
--- linux-2.6.21-rc5-mm4.orig/kernel/Makefile	2007-04-03 23:48:34.000000000 -0700
+++ linux-2.6.21-rc5-mm4/kernel/Makefile	2007-04-04 22:54:32.000000000 -0700
@@ -55,6 +55,7 @@ obj-$(CONFIG_TASK_DELAY_ACCT) += delayac
 obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o
 obj-$(CONFIG_UTRACE) += utrace.o
 obj-$(CONFIG_PTRACE) += ptrace.o
+obj-$(CONFIG_PERFCOUNTER) += perf.o
 
 ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y)
 # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
Index: linux-2.6.21-rc5-mm4/include/linux/perf.h
===================================================================
--- /dev/null	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.21-rc5-mm4/include/linux/perf.h	2007-04-04 23:16:49.000000000 -0700
@@ -0,0 +1,29 @@
+/*
+ * Performance Counters and Measurement macros
+ * (C) 2005 Silicon Graphics Incorporated
+ * by Christoph Lameter <clameter@sgi.com>, April 2005
+ *
+ * Counters are calculated using the cycle counter. If a process
+ * is migrated to another cpu during the measurement then the measurement
+ * is invalid.
+ *
+ * We cannot disable preemption during measurement since that may interfere
+ * with other things in the kernel and limit the usefulness of the counters.
+ */
+
+enum perfcount {
+	PC_SAMPLE,
+	NR_PERFCOUNT
+};
+
+struct pc {
+	unsigned long cycles;
+	int processor;
+	enum perfcount counter;
+};
+
+void pc_start(struct pc *pc, enum perfcount counter);
+void pc_throughput(struct pc *pc, unsigned long bytes);
+
+#define pc_stop(__pc) pc_throughput(__pc, 0)
+
Index: linux-2.6.21-rc5-mm4/kernel/perf.c
===================================================================
--- /dev/null	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.21-rc5-mm4/kernel/perf.c	2007-04-04 23:19:09.000000000 -0700
@@ -0,0 +1,332 @@
+/*
+ * Simple Performance Counter subsystem
+ *
+ * (C) 2007 sgi.
+ *
+ * April 2007, Christoph Lameter <clameter@sgi.com>
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/cpumask.h>
+#include <linux/perf.h>
+/* For the hash function */
+#include <linux/dcache.h>
+
+#ifdef CONFIG_IA64
+#define cycles_to_ns(x) (((x) * local_cpu_data->nsec_per_cyc) >> IA64_NSEC_PER_CYC_SHIFT)
+#else
+#error "cycles_to_ns not defined for this architecture"
+#endif
+
+
+#define MAXCOUNT 128
+struct cpu_perf_counter {
+	u32 events;
+	u32 mintime;
+	u32 maxtime;
+	u32 minbytes;
+	u32 maxbytes;
+	u32 skipped;
+	u64 time;
+	u64 bytes;
+	const char *text;
+};
+
+DEFINE_PER_CPU(struct pc [NR_PERFCOUNT], counters);
+
+void pc_start(struct pc *pc, enum perfcount nr)
+{
+	pc->count = nr;
+	pc->processor = smp_processor_id();
+	pc->start = get_cycles();
+}
+
+void pc_throughput(struct *pc, unsigned long bytes)
+{
+	unsigned long time = get_cycles();
+
+	struct pcs *p;
+
+	get_cpu_var(dfdfd);
+	/* Check if the processor was changed during our measurement attempt */
+	if (unlikely(pc->processor != smp_processor_id())) {
+		/* Only record the failure in the new cpu .... */
+		p->skipped++;
+		goto out;
+	}
+	p->events++;
+	time = cycles_to_ns(time - (t1 >> 8));
+	if (unlikely(time > (1UL << (BITS_PER_LONG - 9)))) {
+		printk(KERN_ERR "perfcount: invalid time difference. Measurement ignored.\n");
+		goto out;
+	};
+
+	if (time > 0) {
+		/* Time information */
+		p->time += time;
+		if (unlikely(time > p->maxtime))
+			p->maxtime = time;
+
+		if (unlikely(p->mintime ==0 || time < p->mintime))
+			p->mintime = time;
+	}
+
+	if (unlikely(bytes)) {
+		/* Bytes information */
+		p->bytes += bytes;
+		if (bytes > p->maxbytes)
+			p->maxbytes = bytes;
+		if (p->minbytes == 0 || bytes < p->minbytes)
+			p->minbytes = bytes;
+	}
+out:
+	put_cpu_var();
+};
+
+EXPORT_SYMBOL(pc);
+
+void pc_reset(void) {
+	int i,j;
+
+	for (i = 0; i < num_possible_cpus(); i++)
+		for(j=0; j<MAXCOUNT;j++) {
+			struct pc_s *c = &per_cpu(counters, i)[0] + j;
+			c->events =0;
+			c->time =0;
+			c->maxtime =0;
+			c->mintime =0;
+			c->bytes =0;
+			c->minbytes =0;
+			c->maxbytes =0;
+		}
+}
+EXPORT_SYMBOL(pc_reset);
+
+struct unit_v_s {
+	unsigned int n;
+	const char * s;
+};
+
+const struct unit_v_s event_units[] = {
+	{ 1000, "" },
+	{ 1000, "K" },
+	{ 1000, "M" },
+	{ 1000, "G" },
+	{ 1000, "T" },
+	{ 1000, "P" },
+	{ 1000, "XX" },
+};
+
+
+const struct unit_v_s time_units[] = {
+	{ 1000, "ns" },
+	{ 1000, "us" },
+	{ 1000, "ms" },
+	{ 60, "s" },
+	{ 60, "m" },
+	{ 24, "h" },
+	{ 365, "d" },
+	{ 1000, "y" },
+};
+
+const struct unit_v_s byte_units[] = {
+	{ 1000, "b" },
+	{ 1000, "kb" },
+	{ 1000, "mb" },
+	{ 1000, "gb" },
+	{ 1000, "tb" },
+	{ 1000, "pb" },
+	{ 1000, "xb" }
+};
+
+/* Print a value using the given array of units and scale it properly */
+static void pval(struct seq_file *s, unsigned long x, const struct unit_v_s *u)
+{
+	unsigned n = 0;
+	unsigned rem = 0;
+	unsigned last_divisor = 0;
+
+	while (x >= u[n].n) {
+		last_divisor = u[n].n;
+		rem = x % last_divisor;
+		x = x / last_divisor;
+		n++;
+	}
+
+	/* Calculate a rounded possible fractional decimal digit */
+	rem = (rem*10 + last_divisor/2) / last_divisor;
+
+	/*
+	 * Rounding may have resulted in the need to go
+	 * to the next number
+	 */
+	if (rem == 10) {
+		x++;
+		rem = 0;
+	};
+
+	seq_printf(s, "%lu", x);
+	if (rem) {
+		seq_putc(s, '.');
+		seq_putc(s, '0' + rem);
+	}
+	seq_puts(s, u[n].s);
+}
+
+/* Print a set of statistical values in the form sum(max/avg/min) */
+static void pc_print(struct seq_file *s, const struct unit_v_s *u, unsigned long count,
+		 unsigned long sum, unsigned long min, unsigned long max)
+{
+	pval(s, sum, u);
+	seq_putc(s,'(');
+	pval(s, min, u);
+	seq_putc(s,'/');
+	pval(s, (sum + count/2 ) / count, u);
+	seq_putc(s,'/');
+	pval(s, max, u);
+	seq_putc(s,')');
+}
+
+
+static int perf_show(struct seq_file *s, void *v)
+{
+	int cpu = (unsigned long)s->private;
+	int counter = (unsigned long)v - 1;
+	int n;
+	struct pc_s summary, *x;
+	const char *text;
+
+	if (cpu >= 0)
+		x = &per_cpu(counters, cpu)[0] + counter;
+	else {
+		memcpy(&summary, &per_cpu(counters, 0)[counter], sizeof(summary));
+		for(n = 1; n < num_possible_cpus(); n++) {
+			struct pc_s *c = &per_cpu(counters, n)[counter];
+
+			summary.events += c->events;
+			summary.skipped += c->skipped;
+			summary.time += c->time;
+			summary.bytes += c->bytes;
+
+			if (summary.maxtime < c->maxtime)
+				summary.maxtime = c->maxtime;
+			if (summary.mintime == 0 ||
+				(c->mintime != 0 && summary.mintime > c->mintime))
+					summary.mintime = c->mintime;
+
+			if (summary.maxbytes < c->maxbytes)
+				summary.maxbytes = c->maxbytes;
+			if (summary.minbytes == 0 ||
+				(c->minbytes != 0 && summary.minbytes > c->minbytes))
+					summary.minbytes = c->minbytes;
+
+			if (summary.text == NULL)
+				summary.text = c->text;
+		}
+		x = &summary;
+	}
+
+	if (!x->events)
+		return 0;
+
+	text = x->text;
+	if (!text)
+		text = "UNKNOWN";
+
+	seq_printf(s, "%-20s %10u (+%3u) ", text, x->events, x->skipped);
+	pc_print(s, time_units, x->events, x->time, x->mintime, x->maxtime);
+	if (x->bytes) {
+		/* Only print throughput information if its available */
+		seq_putc(s,' ');
+		pc_print(s, byte_units, x->events, x->bytes, x->minbytes, x->maxbytes);
+	}
+	seq_putc(s, '\n');
+	return 0;
+}
+
+static void *perf_start(struct seq_file *m, loff_t *pos)
+{
+	return (*pos < MAXCOUNT) ? (void *)(*pos +1) : NULL;
+}
+
+static void *perf_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	++*pos;
+	return perf_start(m, pos);
+}
+
+static void perf_stop(struct seq_file *m, void *v)
+{
+}
+
+struct seq_operations perf_data_ops = {
+	.start  = perf_start,
+	.next   = perf_next,
+	.stop   = perf_stop,
+	.show   = perf_show,
+};
+
+static int perf_data_open(struct inode *inode, struct file *file)
+{
+	int res;
+
+	res = seq_open(file, &perf_data_ops);
+	if (!res)
+		((struct seq_file *)file->private_data)->private = PDE(inode)->data;
+
+	return res;
+};
+
+static struct file_operations perf_data_fops = {
+	.open		= perf_data_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static int perf_reset_write(struct file *file, const char __user *buffer,
+	unsigned long count, void *data)
+{
+	pc_reset();
+	return count;
+}
+
+static __init int init_pc(void) {
+	int i;
+
+	/* Procfs registrations */
+	struct proc_dir_entry *proc_perf, *perf_reset, *perf_all;
+
+	proc_perf = proc_mkdir("perf", NULL);
+	if (!proc_perf)
+		return -ENOMEM;
+
+	perf_reset = create_proc_entry("reset", S_IWUGO, proc_perf);
+	perf_reset->write_proc = perf_reset_write;
+
+	perf_all = create_proc_entry("all", S_IRUGO, proc_perf);
+	perf_all->proc_fops = &perf_data_fops;
+	perf_all->data = (void *)-1;
+
+	for(i = 0; i < num_possible_cpus(); i++) {
+		char name[20];
+		struct proc_dir_entry *p;
+
+		sprintf(name, "%d", i);
+		p = create_proc_entry(name, S_IRUGO, proc_perf);
+
+		p->proc_fops = &perf_data_fops;
+		p->data = (void *)(unsigned long)i;
+	}
+
+	pc_reset();
+	return 0;
+}
+
+__initcall(init_pc);
+
