vfree(cpu_buffer[i].buffer);
}
}
-
-
+
int alloc_cpu_buffers(void)
{
int i;
free_cpu_buffers();
return -ENOMEM;
}
-
void start_cpu_work(void)
{
}
}
-
void end_cpu_work(void)
{
int i;
flush_scheduled_work();
}
-
/* Resets the cpu buffer to a sane state. */
void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf)
{
cpu_buf->last_task = NULL;
}
-
/* compute number of available slots in cpu_buffer queue */
static unsigned long nr_available_slots(struct oprofile_cpu_buffer const * b)
{
return tail + (b->buffer_size - head) - 1;
}
-
static void increment_head(struct oprofile_cpu_buffer * b)
{
unsigned long new_head = b->head_pos + 1;
b->head_pos = 0;
}
-
-
-
-inline static void
+static inline void
add_sample(struct oprofile_cpu_buffer * cpu_buf,
unsigned long pc, unsigned long event)
{
increment_head(cpu_buf);
}
-
-inline static void
+static inline void
add_code(struct oprofile_cpu_buffer * buffer, unsigned long value)
{
add_sample(buffer, ESCAPE_CODE, value);
}
-
/* This must be safe from any context. It's safe writing here
* because of the head/tail separation of the writer and reader
* of the CPU buffer.
return 1;
}
-
static void oprofile_end_trace(struct oprofile_cpu_buffer * cpu_buf)
{
cpu_buf->tracing = 0;
}
-
void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
{
struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
oprofile_end_trace(cpu_buf);
}
-
void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
{
struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
log_sample(cpu_buf, pc, is_kernel, event);
}
-
void oprofile_add_trace(unsigned long pc)
{
struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
add_sample(cpu_buf, pc, 0);
}
-
-
/*
* This serves to avoid cpu buffer overflow, and makes sure
* the task mortuary progresses