Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings
forked fromtorvalds/linux

Commit9ca2c16

Browse files
committed
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Thomas Gleixner: "Perf tool updates and kprobe fixes: - perf_mmap overwrite mode fixes/overhaul, prep work to get 'perf top' using it, making it bearable to use it in large core count systems such as Knights Landing/Mill Intel systems (Kan Liang) - s/390 now uses syscall.tbl, just like x86-64 to generate the syscall table id -> string tables used by 'perf trace' (Hendrik Brueckner) - Use strtoull() instead of home grown function (Andy Shevchenko) - Synchronize kernel ABI headers, v4.16-rc1 (Ingo Molnar) - Document missing 'perf data --force' option (Sangwon Hong) - Add perf vendor JSON metrics for ARM Cortex-A53 Processor (William Cohen) - Improve error handling and error propagation of ftrace based kprobes so failures when installing kprobes are not silently ignored and create disfunctional tracepoints"* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (27 commits) kprobes: Propagate error from disarm_kprobe_ftrace() kprobes: Propagate error from arm_kprobe_ftrace() Revert "tools include s390: Grab a copy of arch/s390/include/uapi/asm/unistd.h" perf s390: Rework system call table creation by using syscall.tbl perf s390: Grab a copy of arch/s390/kernel/syscall/syscall.tbl tools/headers: Synchronize kernel ABI headers, v4.16-rc1 perf test: Fix test trace+probe_libc_inet_pton.sh for s390x perf data: Document missing --force option perf tools: Substitute yet another strtoull() perf top: Check the latency of perf_top__mmap_read() perf top: Switch default mode to overwrite mode perf top: Remove lost events checking perf hists browser: Add parameter to disable lost event warning perf top: Add overwrite fall back perf evsel: Expose the perf_missing_features struct perf top: Check per-event overwrite term perf mmap: Discard legacy interface for mmap read perf test: Update mmap read functions for backward-ring-buffer test perf mmap: Introduce perf_mmap__read_event() perf mmap: Introduce perf_mmap__read_done() ...
2 parents2d6c4e4 +297f923 commit9ca2c16

File tree

34 files changed

+1195
-628
lines changed

34 files changed

+1195
-628
lines changed

‎kernel/kprobes.c‎

Lines changed: 128 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -978,67 +978,90 @@ static int prepare_kprobe(struct kprobe *p)
978978
}
979979

980980
/* Caller must lock kprobe_mutex */
981-
staticvoidarm_kprobe_ftrace(structkprobe*p)
981+
staticintarm_kprobe_ftrace(structkprobe*p)
982982
{
983-
intret;
983+
intret=0;
984984

985985
ret=ftrace_set_filter_ip(&kprobe_ftrace_ops,
986986
(unsigned long)p->addr,0,0);
987-
WARN(ret<0,"Failed to arm kprobe-ftrace at %p (%d)\n",p->addr,ret);
988-
kprobe_ftrace_enabled++;
989-
if (kprobe_ftrace_enabled==1) {
987+
if (ret) {
988+
pr_debug("Failed to arm kprobe-ftrace at %p (%d)\n",p->addr,ret);
989+
returnret;
990+
}
991+
992+
if (kprobe_ftrace_enabled==0) {
990993
ret=register_ftrace_function(&kprobe_ftrace_ops);
991-
WARN(ret<0,"Failed to init kprobe-ftrace (%d)\n",ret);
994+
if (ret) {
995+
pr_debug("Failed to init kprobe-ftrace (%d)\n",ret);
996+
gotoerr_ftrace;
997+
}
992998
}
999+
1000+
kprobe_ftrace_enabled++;
1001+
returnret;
1002+
1003+
err_ftrace:
1004+
/*
1005+
* Note: Since kprobe_ftrace_ops has IPMODIFY set, and ftrace requires a
1006+
* non-empty filter_hash for IPMODIFY ops, we're safe from an accidental
1007+
* empty filter_hash which would undesirably trace all functions.
1008+
*/
1009+
ftrace_set_filter_ip(&kprobe_ftrace_ops, (unsigned long)p->addr,1,0);
1010+
returnret;
9931011
}
9941012

9951013
/* Caller must lock kprobe_mutex */
996-
staticvoiddisarm_kprobe_ftrace(structkprobe*p)
1014+
staticintdisarm_kprobe_ftrace(structkprobe*p)
9971015
{
998-
intret;
1016+
intret=0;
9991017

1000-
kprobe_ftrace_enabled--;
1001-
if (kprobe_ftrace_enabled==0) {
1018+
if (kprobe_ftrace_enabled==1) {
10021019
ret=unregister_ftrace_function(&kprobe_ftrace_ops);
1003-
WARN(ret<0,"Failed to init kprobe-ftrace (%d)\n",ret);
1020+
if (WARN(ret<0,"Failed to unregister kprobe-ftrace (%d)\n",ret))
1021+
returnret;
10041022
}
1023+
1024+
kprobe_ftrace_enabled--;
1025+
10051026
ret=ftrace_set_filter_ip(&kprobe_ftrace_ops,
10061027
(unsigned long)p->addr,1,0);
10071028
WARN(ret<0,"Failed to disarm kprobe-ftrace at %p (%d)\n",p->addr,ret);
1029+
returnret;
10081030
}
10091031
#else/* !CONFIG_KPROBES_ON_FTRACE */
10101032
#defineprepare_kprobe(p)arch_prepare_kprobe(p)
1011-
#definearm_kprobe_ftrace(p)do {} while (0)
1012-
#definedisarm_kprobe_ftrace(p)do {} while (0)
1033+
#definearm_kprobe_ftrace(p)(-ENODEV)
1034+
#definedisarm_kprobe_ftrace(p)(-ENODEV)
10131035
#endif
10141036

10151037
/* Arm a kprobe with text_mutex */
1016-
staticvoidarm_kprobe(structkprobe*kp)
1038+
staticintarm_kprobe(structkprobe*kp)
10171039
{
1018-
if (unlikely(kprobe_ftrace(kp))) {
1019-
arm_kprobe_ftrace(kp);
1020-
return;
1021-
}
1040+
if (unlikely(kprobe_ftrace(kp)))
1041+
returnarm_kprobe_ftrace(kp);
1042+
10221043
cpus_read_lock();
10231044
mutex_lock(&text_mutex);
10241045
__arm_kprobe(kp);
10251046
mutex_unlock(&text_mutex);
10261047
cpus_read_unlock();
1048+
1049+
return0;
10271050
}
10281051

10291052
/* Disarm a kprobe with text_mutex */
1030-
staticvoiddisarm_kprobe(structkprobe*kp,boolreopt)
1053+
staticintdisarm_kprobe(structkprobe*kp,boolreopt)
10311054
{
1032-
if (unlikely(kprobe_ftrace(kp))) {
1033-
disarm_kprobe_ftrace(kp);
1034-
return;
1035-
}
1055+
if (unlikely(kprobe_ftrace(kp)))
1056+
returndisarm_kprobe_ftrace(kp);
10361057

10371058
cpus_read_lock();
10381059
mutex_lock(&text_mutex);
10391060
__disarm_kprobe(kp,reopt);
10401061
mutex_unlock(&text_mutex);
10411062
cpus_read_unlock();
1063+
1064+
return0;
10421065
}
10431066

10441067
/*
@@ -1362,9 +1385,15 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
13621385

13631386
if (ret==0&&kprobe_disabled(ap)&& !kprobe_disabled(p)) {
13641387
ap->flags &= ~KPROBE_FLAG_DISABLED;
1365-
if (!kprobes_all_disarmed)
1388+
if (!kprobes_all_disarmed) {
13661389
/* Arm the breakpoint again. */
1367-
arm_kprobe(ap);
1390+
ret=arm_kprobe(ap);
1391+
if (ret) {
1392+
ap->flags |=KPROBE_FLAG_DISABLED;
1393+
list_del_rcu(&p->list);
1394+
synchronize_sched();
1395+
}
1396+
}
13681397
}
13691398
returnret;
13701399
}
@@ -1573,8 +1602,14 @@ int register_kprobe(struct kprobe *p)
15731602
hlist_add_head_rcu(&p->hlist,
15741603
&kprobe_table[hash_ptr(p->addr,KPROBE_HASH_BITS)]);
15751604

1576-
if (!kprobes_all_disarmed&& !kprobe_disabled(p))
1577-
arm_kprobe(p);
1605+
if (!kprobes_all_disarmed&& !kprobe_disabled(p)) {
1606+
ret=arm_kprobe(p);
1607+
if (ret) {
1608+
hlist_del_rcu(&p->hlist);
1609+
synchronize_sched();
1610+
gotoout;
1611+
}
1612+
}
15781613

15791614
/* Try to optimize kprobe */
15801615
try_to_optimize_kprobe(p);
@@ -1608,11 +1643,12 @@ static int aggr_kprobe_disabled(struct kprobe *ap)
16081643
staticstructkprobe*__disable_kprobe(structkprobe*p)
16091644
{
16101645
structkprobe*orig_p;
1646+
intret;
16111647

16121648
/* Get an original kprobe for return */
16131649
orig_p=__get_valid_kprobe(p);
16141650
if (unlikely(orig_p==NULL))
1615-
returnNULL;
1651+
returnERR_PTR(-EINVAL);
16161652

16171653
if (!kprobe_disabled(p)) {
16181654
/* Disable probe if it is a child probe */
@@ -1626,8 +1662,13 @@ static struct kprobe *__disable_kprobe(struct kprobe *p)
16261662
* should have already been disarmed, so
16271663
* skip unneed disarming process.
16281664
*/
1629-
if (!kprobes_all_disarmed)
1630-
disarm_kprobe(orig_p, true);
1665+
if (!kprobes_all_disarmed) {
1666+
ret=disarm_kprobe(orig_p, true);
1667+
if (ret) {
1668+
p->flags &= ~KPROBE_FLAG_DISABLED;
1669+
returnERR_PTR(ret);
1670+
}
1671+
}
16311672
orig_p->flags |=KPROBE_FLAG_DISABLED;
16321673
}
16331674
}
@@ -1644,8 +1685,8 @@ static int __unregister_kprobe_top(struct kprobe *p)
16441685

16451686
/* Disable kprobe. This will disarm it if needed. */
16461687
ap=__disable_kprobe(p);
1647-
if (ap==NULL)
1648-
return-EINVAL;
1688+
if (IS_ERR(ap))
1689+
returnPTR_ERR(ap);
16491690

16501691
if (ap==p)
16511692
/*
@@ -2078,12 +2119,14 @@ static void kill_kprobe(struct kprobe *p)
20782119
intdisable_kprobe(structkprobe*kp)
20792120
{
20802121
intret=0;
2122+
structkprobe*p;
20812123

20822124
mutex_lock(&kprobe_mutex);
20832125

20842126
/* Disable this kprobe */
2085-
if (__disable_kprobe(kp)==NULL)
2086-
ret=-EINVAL;
2127+
p=__disable_kprobe(kp);
2128+
if (IS_ERR(p))
2129+
ret=PTR_ERR(p);
20872130

20882131
mutex_unlock(&kprobe_mutex);
20892132
returnret;
@@ -2116,7 +2159,9 @@ int enable_kprobe(struct kprobe *kp)
21162159

21172160
if (!kprobes_all_disarmed&&kprobe_disabled(p)) {
21182161
p->flags &= ~KPROBE_FLAG_DISABLED;
2119-
arm_kprobe(p);
2162+
ret=arm_kprobe(p);
2163+
if (ret)
2164+
p->flags |=KPROBE_FLAG_DISABLED;
21202165
}
21212166
out:
21222167
mutex_unlock(&kprobe_mutex);
@@ -2407,11 +2452,12 @@ static const struct file_operations debugfs_kprobe_blacklist_ops = {
24072452
.release=seq_release,
24082453
};
24092454

2410-
staticvoidarm_all_kprobes(void)
2455+
staticintarm_all_kprobes(void)
24112456
{
24122457
structhlist_head*head;
24132458
structkprobe*p;
2414-
unsignedinti;
2459+
unsignedinti,total=0,errors=0;
2460+
interr,ret=0;
24152461

24162462
mutex_lock(&kprobe_mutex);
24172463

@@ -2428,46 +2474,74 @@ static void arm_all_kprobes(void)
24282474
/* Arming kprobes doesn't optimize kprobe itself */
24292475
for (i=0;i<KPROBE_TABLE_SIZE;i++) {
24302476
head=&kprobe_table[i];
2431-
hlist_for_each_entry_rcu(p,head,hlist)
2432-
if (!kprobe_disabled(p))
2433-
arm_kprobe(p);
2477+
/* Arm all kprobes on a best-effort basis */
2478+
hlist_for_each_entry_rcu(p,head,hlist) {
2479+
if (!kprobe_disabled(p)) {
2480+
err=arm_kprobe(p);
2481+
if (err) {
2482+
errors++;
2483+
ret=err;
2484+
}
2485+
total++;
2486+
}
2487+
}
24342488
}
24352489

2436-
printk(KERN_INFO"Kprobes globally enabled\n");
2490+
if (errors)
2491+
pr_warn("Kprobes globally enabled, but failed to arm %d out of %d probes\n",
2492+
errors,total);
2493+
else
2494+
pr_info("Kprobes globally enabled\n");
24372495

24382496
already_enabled:
24392497
mutex_unlock(&kprobe_mutex);
2440-
return;
2498+
returnret;
24412499
}
24422500

2443-
staticvoiddisarm_all_kprobes(void)
2501+
staticintdisarm_all_kprobes(void)
24442502
{
24452503
structhlist_head*head;
24462504
structkprobe*p;
2447-
unsignedinti;
2505+
unsignedinti,total=0,errors=0;
2506+
interr,ret=0;
24482507

24492508
mutex_lock(&kprobe_mutex);
24502509

24512510
/* If kprobes are already disarmed, just return */
24522511
if (kprobes_all_disarmed) {
24532512
mutex_unlock(&kprobe_mutex);
2454-
return;
2513+
return0;
24552514
}
24562515

24572516
kprobes_all_disarmed= true;
2458-
printk(KERN_INFO"Kprobes globally disabled\n");
24592517

24602518
for (i=0;i<KPROBE_TABLE_SIZE;i++) {
24612519
head=&kprobe_table[i];
2520+
/* Disarm all kprobes on a best-effort basis */
24622521
hlist_for_each_entry_rcu(p,head,hlist) {
2463-
if (!arch_trampoline_kprobe(p)&& !kprobe_disabled(p))
2464-
disarm_kprobe(p, false);
2522+
if (!arch_trampoline_kprobe(p)&& !kprobe_disabled(p)) {
2523+
err=disarm_kprobe(p, false);
2524+
if (err) {
2525+
errors++;
2526+
ret=err;
2527+
}
2528+
total++;
2529+
}
24652530
}
24662531
}
2532+
2533+
if (errors)
2534+
pr_warn("Kprobes globally disabled, but failed to disarm %d out of %d probes\n",
2535+
errors,total);
2536+
else
2537+
pr_info("Kprobes globally disabled\n");
2538+
24672539
mutex_unlock(&kprobe_mutex);
24682540

24692541
/* Wait for disarming all kprobes by optimizer */
24702542
wait_for_kprobe_optimizer();
2543+
2544+
returnret;
24712545
}
24722546

24732547
/*
@@ -2494,6 +2568,7 @@ static ssize_t write_enabled_file_bool(struct file *file,
24942568
{
24952569
charbuf[32];
24962570
size_tbuf_size;
2571+
intret=0;
24972572

24982573
buf_size=min(count, (sizeof(buf)-1));
24992574
if (copy_from_user(buf,user_buf,buf_size))
@@ -2504,17 +2579,20 @@ static ssize_t write_enabled_file_bool(struct file *file,
25042579
case'y':
25052580
case'Y':
25062581
case'1':
2507-
arm_all_kprobes();
2582+
ret=arm_all_kprobes();
25082583
break;
25092584
case'n':
25102585
case'N':
25112586
case'0':
2512-
disarm_all_kprobes();
2587+
ret=disarm_all_kprobes();
25132588
break;
25142589
default:
25152590
return-EINVAL;
25162591
}
25172592

2593+
if (ret)
2594+
returnret;
2595+
25182596
returncount;
25192597
}
25202598

‎tools/arch/powerpc/include/uapi/asm/kvm.h‎

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -632,6 +632,8 @@ struct kvm_ppc_cpu_char {
632632
#defineKVM_REG_PPC_TIDR(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbc)
633633
#defineKVM_REG_PPC_PSSCR(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd)
634634

635+
#defineKVM_REG_PPC_DEC_EXPIRY(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe)
636+
635637
/* Transactional Memory checkpointed state:
636638
* This is all GPRs, all VSX regs and a subset of SPRs
637639
*/

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp