diff options
author | Jason Gunthorpe <jgg@mellanox.com> | 2018-08-16 23:13:03 +0300 |
---|---|---|
committer | Jason Gunthorpe <jgg@mellanox.com> | 2018-08-16 23:21:29 +0300 |
commit | 0a3173a5f09bc58a3638ecfd0a80bdbae55e123c (patch) | |
tree | d6c0bc84863cca54dfbde3b7463e5d49c82af9f1 /arch/arm/kernel/perf_event_xscale.c | |
parent | 92f4e77c85918eab5e5803d7e28ab89a7e6bd3a2 (diff) | |
parent | 5c60a7389d795e001c8748b458eb76e3a5b6008c (diff) | |
download | linux-0a3173a5f09bc58a3638ecfd0a80bdbae55e123c.tar.xz |
Merge branch 'linus/master' into rdma.git for-next
rdma.git merge resolution for the 4.19 merge window
Conflicts:
drivers/infiniband/core/rdma_core.c
- Use the rdma code and revise with the new spelling for
atomic_fetch_add_unless
drivers/nvme/host/rdma.c
- Replace max_sge with max_send_sge in new blk code
drivers/nvme/target/rdma.c
- Use the blk code and revise to use NULL for ib_post_recv when
appropriate
- Replace max_sge with max_recv_sge in new blk code
net/rds/ib_send.c
- Use the net code and revise to use NULL for ib_post_recv when
appropriate
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'arch/arm/kernel/perf_event_xscale.c')
-rw-r--r-- | arch/arm/kernel/perf_event_xscale.c | 18 |
1 files changed, 12 insertions, 6 deletions
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index 88d1a76f5367..f6cdcacfb96d 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c @@ -292,6 +292,12 @@ xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc, } } +static void xscalepmu_clear_event_idx(struct pmu_hw_events *cpuc, + struct perf_event *event) +{ + clear_bit(event->hw.idx, cpuc->used_mask); +} + static void xscale1pmu_start(struct arm_pmu *cpu_pmu) { unsigned long flags, val; @@ -316,7 +322,7 @@ static void xscale1pmu_stop(struct arm_pmu *cpu_pmu) raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } -static inline u32 xscale1pmu_read_counter(struct perf_event *event) +static inline u64 xscale1pmu_read_counter(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; int counter = hwc->idx; @@ -337,7 +343,7 @@ static inline u32 xscale1pmu_read_counter(struct perf_event *event) return val; } -static inline void xscale1pmu_write_counter(struct perf_event *event, u32 val) +static inline void xscale1pmu_write_counter(struct perf_event *event, u64 val) { struct hw_perf_event *hwc = &event->hw; int counter = hwc->idx; @@ -370,11 +376,11 @@ static int xscale1pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->read_counter = xscale1pmu_read_counter; cpu_pmu->write_counter = xscale1pmu_write_counter; cpu_pmu->get_event_idx = xscale1pmu_get_event_idx; + cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx; cpu_pmu->start = xscale1pmu_start; cpu_pmu->stop = xscale1pmu_stop; cpu_pmu->map_event = xscale_map_event; cpu_pmu->num_events = 3; - cpu_pmu->max_period = (1LLU << 32) - 1; return 0; } @@ -679,7 +685,7 @@ static void xscale2pmu_stop(struct arm_pmu *cpu_pmu) raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } -static inline u32 xscale2pmu_read_counter(struct perf_event *event) +static inline u64 xscale2pmu_read_counter(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; int counter = hwc->idx; @@ -706,7 +712,7 @@ static inline u32 xscale2pmu_read_counter(struct perf_event *event) return val; } -static inline void xscale2pmu_write_counter(struct perf_event *event, u32 val) +static inline void xscale2pmu_write_counter(struct perf_event *event, u64 val) { struct hw_perf_event *hwc = &event->hw; int counter = hwc->idx; @@ -739,11 +745,11 @@ static int xscale2pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->read_counter = xscale2pmu_read_counter; cpu_pmu->write_counter = xscale2pmu_write_counter; cpu_pmu->get_event_idx = xscale2pmu_get_event_idx; + cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx; cpu_pmu->start = xscale2pmu_start; cpu_pmu->stop = xscale2pmu_stop; cpu_pmu->map_event = xscale_map_event; cpu_pmu->num_events = 5; - cpu_pmu->max_period = (1LLU << 32) - 1; return 0; } |