summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm/pda.h
blob: e3d3a081d7985bc8673ad220284769a69993012f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
#ifndef _ASM_X86_PDA_H
#define _ASM_X86_PDA_H

#ifndef __ASSEMBLY__
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/cache.h>
#include <linux/threads.h>
#include <asm/page.h>
#include <asm/percpu.h>

/* Per processor datastructure. %gs points to it while the kernel runs */
struct x8664_pda {
	struct task_struct *pcurrent;	/* 0  Current process */
	unsigned long dummy;
	unsigned long kernelstack;	/* 16 top of kernel stack for current */
	unsigned long oldrsp;		/* 24 user rsp for system call */
	int irqcount;			/* 32 Irq nesting counter. Starts -1 */
	unsigned int cpunumber;		/* 36 Logical CPU number */
#ifdef CONFIG_CC_STACKPROTECTOR
	unsigned long stack_canary;	/* 40 stack canary value */
					/* gcc-ABI: this canary MUST be at
					   offset 40!!! */
#endif
	char *irqstackptr;
	short nodenumber;		/* number of current node (32k max) */
	short in_bootmem;		/* pda lives in bootmem */
	unsigned int __softirq_pending;
	unsigned int __nmi_count;	/* number of NMI on this CPUs */
	short mmu_state;
	short isidle;
	struct mm_struct *active_mm;
	unsigned apic_timer_irqs;
	unsigned irq0_irqs;
	unsigned irq_resched_count;
	unsigned irq_call_count;
	unsigned irq_tlb_count;
	unsigned irq_thermal_count;
	unsigned irq_threshold_count;
	unsigned irq_spurious_count;
} ____cacheline_aligned_in_smp;

DECLARE_PER_CPU(struct x8664_pda, __pda);
extern void pda_init(int);

#define cpu_pda(cpu)		(&per_cpu(__pda, cpu))

#define read_pda(field)		x86_read_percpu(__pda.field)
#define write_pda(field, val)	x86_write_percpu(__pda.field, val)
#define add_pda(field, val)	x86_add_percpu(__pda.field, val)
#define sub_pda(field, val)	x86_sub_percpu(__pda.field, val)
#define or_pda(field, val)	x86_or_percpu(__pda.field, val)

/* This is not atomic against other CPUs -- CPU preemption needs to be off */
#define test_and_clear_bit_pda(bit, field)				\
	x86_test_and_clear_bit_percpu(bit, __pda.field)

#endif

#define PDA_STACKOFFSET (5*8)

#endif /* _ASM_X86_PDA_H */