forked from xnl-h4ck3r/xnLinkFinder
-
Notifications
You must be signed in to change notification settings - Fork 0
/
xnLinkFinder.py
3920 lines (3529 loc) · 173 KB
/
xnLinkFinder.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env python
# Python 3
# Good luck and good hunting! If you really love the tool (or any others), or they helped you find an awesome bounty, consider BUYING ME A COFFEE! (https://ko-fi.com/xnlh4ck3r) ☕ (I could use the caffeine!)
inScopePrefixDomains = None
inScopeFilterDomains = None
burpFile = False
zapFile = False
caidoFile = False
stdFile = False
urlPassed = False
dirPassed = False
stdinMultiple = False
stdinFile = []
inputFile = None
linksFound = set()
failedPrefixLinks = set()
linksVisited = set()
paramsFound = set()
wordsFound = set()
lstStopWords = {}
lstPathWords = set()
extraStopWords = ""
contentTypesProcessed = set()
lstExclusions = {}
lstFileExtExclusions = {}
requestHeaders = {}
totalRequests = 0
skippedRequests = 0
failedRequests = 0
maxMemoryUsage = 0
currentMemUsage = 0
maxMemoryPercent = 0
currentMemPercent = 0
currentUAGroup = 0
userAgents = []
userAgent = ""
tooManyRequests = 0
tooManyForbidden = 0
tooManyTimeouts = 0
tooManyConnectionErrors = 0
stopProgramCount = 0
terminalWidth = 120
waymoreMode = False
waymoreFiles = set()
import re
import os
import requests
import argparse
from termcolor import colored
from signal import signal, SIGINT
from sys import exit, stdin
import multiprocessing.dummy as mp
import base64
import xml.etree.ElementTree as etree
import yaml
import subprocess
import random
import math
import enum
from urllib3.exceptions import InsecureRequestWarning
import sys
from urllib.parse import urlparse
from tempfile import NamedTemporaryFile
from datetime import datetime
from bs4 import BeautifulSoup, Comment
import csv
# Try to import lxml to use with beautifulsoup4 instead of the default parser
try:
lxmlInstalled = True
import lxml
except:
lxmlInstalled = False
# Try to import html5lib to use with beautifulsoup4 instead of the default parser
try:
html5libInstalled = True
import html5lib
except:
html5libInstalled = False
startDateTime = datetime.now()
# Try to import psutil to show memory usage
try:
import psutil
except:
currentMemUsage = -1
maxMemoryUsage = -1
currentMemPercent = -1
maxMemoryPercent = -1
# Creating stopProgram enum
class StopProgram(enum.Enum):
SIGINT = 1
TOO_MANY_REQUESTS = 2
TOO_MANY_FORBIDDEN = 3
TOO_MANY_TIMEOUTS = 4
TOO_MANY_CONNECTION_ERRORS = 5
MEMORY_THRESHOLD = 6
MAX_TIME_LIMIT = 7
stopProgram = None
# Yaml config values
LINK_EXCLUSIONS = ""
CONTENTTYPE_EXCLUSIONS = ""
FILEEXT_EXCLUSIONS = ""
LINK_REGEX_FILES = ""
RESP_PARAM_LINKSFOUND = True
RESP_PARAM_PATHWORDS = True
RESP_PARAM_JSON = True
RESP_PARAM_JSVARS = True
RESP_PARAM_XML = True
RESP_PARAM_INPUTFIELD = True
RESP_PARAM_METANAME = True
WORDS_CONTENT_TYPES = ""
STOP_WORDS = ""
# A comma separated list of Link exclusions used when the exclusions from config.yml cannot be found
# Links are NOT output if they contain these strings. This just applies to the links found in endpoints, not the origin link in which it was found
DEFAULT_LINK_EXCLUSIONS = ".css,.jpg,.jpeg,.png,.svg,.img,.gif,.mp4,.flv,.ogv,.webm,.webp,.mov,.mp3,.m4a,.m4p,.scss,.tif,.tiff,.ttf,.otf,.woff,.woff2,.bmp,.ico,.eot,.htc,.rtf,.swf,.image,w3.org,doubleclick.net,youtube.com,.vue,jquery,bootstrap,font,jsdelivr.net,vimeo.com,pinterest.com,facebook,linkedin,twitter,instagram,google,mozilla.org,jibe.com,schema.org,schemas.microsoft.com,wordpress.org,w.org,wix.com,parastorage.com,whatwg.org,polyfill.io,typekit.net,schemas.openxmlformats.org,openweathermap.org,openoffice.org,reactjs.org,angularjs.org,java.com,purl.org,/image,/img,/css,/wp-json,/wp-content,/wp-includes,/theme,/audio,/captcha,/font,robots.txt,node_modules,.wav,.gltf,.pict,.svgz,.eps,.midi,.mid,.avif"
# A comma separated list of Content-Type exclusions used when the exclusions from config.yml cannot be found
# These content types will NOT be checked
DEFAULT_CONTENTTYPE_EXCLUSIONS = "text/css,image/jpeg,image/jpg,image/png,image/svg+xml,image/gif,image/tiff,image/webp,image/bmp,image/x-icon,image/vnd.microsoft.icon,font/ttf,font/woff,font/woff2,font/x-woff2,font/x-woff,font/otf,audio/mpeg,audio/wav,audio/webm,audio/aac,audio/ogg,audio/wav,audio/webm,video/mp4,video/mpeg,video/webm,video/ogg,video/mp2t,video/webm,video/x-msvideo,application/font-woff,application/font-woff2,application/vnd.android.package-archive,binary/octet-stream,application/octet-stream,application/pdf,application/x-font-ttf,application/x-font-otf,application/x-font-woff,application/vnd.ms-fontobject,image/avif,application/zip,application/x-zip-compressed,application/x-msdownload,application/x-apple-diskimage,application/x-rpm,application/vnd.debian.binary-package"
# A comma separated list of file extension exclusions used when the file ext exclusions from config.yml cannot be found
# In Directory mode, files with these extensions will NOT be checked
DEFAULT_FILEEXT_EXCLUSIONS = ".zip,.dmg,.rpm,.deb,.gz,.tar,.jpg,.jpeg,.png,.svg,.img,.gif,.mp4,.flv,.ogv,.webm,.webp,.mov,.mp3,.m4a,.m4p,.scss,.tif,.tiff,.ttf,.otf,.woff,.woff2,.bmp,.ico,.eot,.htc,.rtf,.swf,.image,.wav,.gltf,.pict,.svgz,.eps,.midi,.mid"
# A list of files used in the Link Finding Regex when the exclusions from config.yml cannot be found.
# These are used in the 5th capturing group that aren't obvious links, but could be files
DEFAULT_LINK_REGEX_FILES = "php|php3|php5|asp|aspx|ashx|cfm|cgi|pl|jsp|jspx|json|js|action|html|xhtml|htm|bak|do|txt|wsdl|wadl|xml|xls|xlsx|bin|conf|config|bz2|bzip2|gzip|tar\.gz|tgz|log|src|zip|js\.map"
# Uer Agents
UA_DESKTOP = [
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.246",
"Mozilla/5.0 (X11; CrOS x86_64 8172.45.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.64 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/601.3.9 (KHTML, like Gecko) Version/9.0.2 Safari/601.3.9",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.84 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.75 Safari/537.36 Edg/99.0.1150.36",
"Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko",
"Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.75 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:99.0) Gecko/20100101 Firefox/99.0",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:105.0) Gecko/20100101 Firefox/105.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 12.6; rv:105.0) Gecko/20100101 Firefox/105.0",
"Mozilla/5.0 (X11; Linux i686; rv:105.0) Gecko/20100101 Firefox/105.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:105.0) Gecko/20100101 Firefox/105.0",
"Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:105.0) Gecko/20100101 Firefox/105.0",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36 Edg/106.0.1370.34",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36 Edg/106.0.1370.34",
"Mozilla/5.0 (Windows NT 10.0; Trident/7.0; rv:11.0) like Gecko"
]
UA_MOBILE_APPLE = [
"Mozilla/5.0 (iPhone; CPU iPhone OS 12_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1",
"Mozilla/5.0 (iPhone; CPU iPhone OS 12_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/69.0.3497.105 Mobile/15E148 Safari/605.1",
"Mozilla/5.0 (iPhone; CPU iPhone OS 12_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) FxiOS/13.2b11866 Mobile/16A366 Safari/605.1.15",
"Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1",
"Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.34 (KHTML, like Gecko) Version/11.0 Mobile/15A5341f Safari/604.1",
"Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A5370a Safari/604.1",
"Mozilla/5.0 (iPhone9,3; U; CPU iPhone OS 10_0_1 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) Version/10.0 Mobile/14A403 Safari/602.1",
"Mozilla/5.0 (iPhone9,4; U; CPU iPhone OS 10_0_1 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) Version/10.0 Mobile/14A403 Safari/602.1",
"Mozilla/5.0 (Apple-iPhone7C2/1202.466; U; CPU like Mac OS X; en) AppleWebKit/420+ (KHTML, like Gecko) Version/3.0 Mobile/1A543 Safari/419.3"
]
UA_MOBILE_ANDROID = [
"Mozilla/5.0 (Linux; Android 8.0.0; SM-G960F Build/R16NW) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.84 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; Android 7.0; SM-G892A Build/NRD90M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/60.0.3112.107 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; Android 7.0; SM-G930VC Build/NRD90M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/58.0.3029.83 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; Android 6.0.1; SM-G935S Build/MMB29K; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/55.0.2883.91 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; Android 6.0.1; SM-G920V Build/MMB29K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.98 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; Android 5.1.1; SM-G928X Build/LMY47X) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.83 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; Android 6.0.1; Nexus 6P Build/MMB29P) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.83 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; Android 7.1.1; G8231 Build/41.2.A.0.219; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/59.0.3071.125 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; Android 6.0.1; E6653 Build/32.2.A.0.253) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.98 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; Android 6.0; HTC One X10 Build/MRA58K; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/61.0.3163.98 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; Android 6.0; HTC One M9 Build/MRA58K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.98 Mobile Safari/537.3",
]
UA_MOBILE_WINDOWS = [
"Mozilla/5.0 (Windows Phone 10.0; Android 6.0.1; Microsoft; RM-1152) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Mobile Safari/537.36 Edge/15.15254",
"Mozilla/5.0 (Windows Phone 10.0; Android 4.2.1; Microsoft; RM-1127_16056) AppleWebKit/537.36(KHTML, like Gecko) Chrome/42.0.2311.135 Mobile Safari/537.36 Edge/12.10536",
"Mozilla/5.0 (Windows Phone 10.0; Android 4.2.1; Microsoft; Lumia 950) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2486.0 Mobile Safari/537.36 Edge/13.1058",
]
UA_MOBILE = UA_MOBILE_APPLE + UA_MOBILE_ANDROID + UA_MOBILE_WINDOWS
UA_TABLET = [
"Mozilla/5.0 (Linux; Android 7.0; Pixel C Build/NRD90M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/52.0.2743.98 Safari/537.36",
"Mozilla/5.0 (Linux; Android 6.0.1; SGP771 Build/32.2.A.0.253; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/52.0.2743.98 Safari/537.36",
"Mozilla/5.0 (Linux; Android 6.0.1; SHIELD Tablet K1 Build/MRA58K; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/55.0.2883.91 Safari/537.36",
"Mozilla/5.0 (Linux; Android 7.0; SM-T827R4 Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.116 Safari/537.36",
"Mozilla/5.0 (Linux; Android 5.0.2; SAMSUNG SM-T550 Build/LRX22G) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/3.3 Chrome/38.0.2125.102 Safari/537.36"
"Mozilla/5.0 (Linux; Android 4.4.3; KFTHWI Build/KTU84M) AppleWebKit/537.36 (KHTML, like Gecko) Silk/47.1.79 like Chrome/47.0.2526.80 Safari/537.36",
]
UA_SETTOPBOXES = [
"Mozilla/5.0 (CrKey armv7l 1.5.16041) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.0 Safari/537.36",
"Roku4640X/DVP-7.70 (297.70E04154A)",
"Mozilla/5.0 (Linux; U; Android 4.2.2; he-il; NEO-X5-116A Build/JDQ39) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Safari/534.30",
"Mozilla/5.0 (Linux; Android 5.1; AFTS Build/LMY47O) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/41.99900.2250.0242 Safari/537.36",
"Dalvik/2.1.0 (Linux; U; Android 6.0.1; Nexus Player Build/MMB29T)",
"AppleTV6,2/11.1",
"AppleTV5,3/9.1.1",
]
UA_GAMECONSOLE = [
"Mozilla/5.0 (Nintendo WiiU) AppleWebKit/536.30 (KHTML, like Gecko) NX/3.0.4.2.12 NintendoBrowser/4.3.1.11264.US",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; XBOX_ONE_ED) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393",
"Mozilla/5.0 (Windows Phone 10.0; Android 4.2.1; Xbox; Xbox One) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2486.0 Mobile Safari/537.36 Edge/13.10586",
"Mozilla/5.0 (PlayStation 4 3.11) AppleWebKit/537.73 (KHTML, like Gecko)",
"Mozilla/5.0 (PlayStation Vita 3.61) AppleWebKit/537.73 (KHTML, like Gecko) Silk/3.2",
"Mozilla/5.0 (Nintendo 3DS; U; ; en) Version/1.7412.EU",
]
DEFAULT_WORDS_CONTENT_TYPES = "text/html,application/xml,application/json,text/plain,application/xhtml+xml,application/ld+json,text/xml"
# Default english "stop word" list
DEFAULT_STOP_WORDS = "a,aboard,about,above,across,after,afterwards,again,against,all,almost,alone,along,already,also,although,always,am,amid,among,amongst,an,and,another,any,anyhow,anyone,anything,anyway,anywhere,are,around,as,at,back,be,became,because,become,becomes,becoming,been,before,beforehand,behind,being,below,beneath,beside,besides,between,beyond,both,bottom,but,by,can,cannot,cant,con,concerning,considering,could,couldnt,cry,de,describe,despite,do,done,down,due,during,each,eg,eight,either,eleven,else,elsewhere,empty,enough,etc,even,ever,every,everyone,everything,everywhere,except,few,fifteen,fifty,fill,find,fire,first,five,for,former,formerly,forty,found,four,from,full,further,get,give,go,had,has,hasnt,have,he,hence,her,here,hereafter,hereby,herein,hereupon,hers,herself,him,himself,his,how,however,hundred,i,ie,if,in,inc,indeed,inside,interest,into,is,it,its,itself,keep,last,latter,latterly,least,less,like,ltd,made,many,may,me,meanwhile,might,mill,mine,more,moreover,most,mostly,move,much,must,my,myself,name,namely,near,neither,never,nevertheless,next,nine,no,nobody,none,noone,nor,not,nothing,now,nowhere,of,off,often,on,once,one,only,onto,or,other,others,otherwise,our,ours,ourselves,out,outside,over,own,part,past,per,perhaps,please,put,rather,re,regarding,round,same,see,seem,seemed,seeming,seems,serious,several,she,should,show,side,since,sincere,six,sixty,so,some,somehow,someone,something,sometime,sometimes,somewhere,still,such,take,ten,than,that,the,their,them,themselves,then,thence,there,thereafter,thereby,therefore,therein,thereupon,these,they,thick,thin,third,this,those,though,three,through,throughout,thru,thus,to,together,too,top,toward,towards,twelve,twenty,two,un,under,underneath,until,unto,up,upon,us,very,via,want,was,we,well,went,were,weve,what,whatever,when,whence,whenever,where,whereafter,whereas,whereby,wherein,whereupon,wherever,whether,which,while,whilst,whither,whoever,whole,whom,whose,why,will,with,within,without,would,yet,you,youll,your,youre,yours,yourself,yourselves,youve"
def write(text="", pipe=False):
# Only send text to stdout if the tool isn't piped to pass output to something else,
# or if the tool has been piped and the pipe parameter is True
if sys.stdout.isatty() or (not sys.stdout.isatty() and pipe):
# If it has % Complete in the text then its for the progress bar, so don't add a newline
if text.find("% Complete") > 0:
sys.stdout.write(text)
else:
sys.stdout.write(text + "\n")
def writerr(text="", pipe=False):
# Only send text to stdout if the tool isn't piped to pass output to something else,
# or If the tool has been piped to output the send to stderr
if sys.stdout.isatty():
# If it has % Complete in the text then its for the progress bar, so don't add a newline
if text.find("% Complete") > 0:
sys.stdout.write(text)
else:
sys.stdout.write(text + "\n")
else:
# If it has % Complete in the text then its for the progress bar, so don't add a newline
if text.find("% Complete") > 0:
sys.stderr.write(text)
else:
sys.stderr.write(text + "\n")
def showBanner():
write("")
write(colored(" o o o--o o ", "red"))
write(colored(" | o | / | o | ", "yellow"))
write(colored(" \ / o-o | o-o OO O-o o-o o-O o-o o-o ", "green"))
write(colored(" o | | | | | | | \ | | | | | | |-' | ", "cyan"))
write(colored(" / \ o o O---o| o o o o o | o o o-o o-o o ", "magenta"))
write(colored(" | | ", "blue"))
write(colored(" ' by @Xnl-h4ck3r ' v" + __import__('xnLinkFinder').__version__))
write("")
# Functions used when printing messages dependant on verbose options
def verbose():
return args.verbose or args.vverbose
def vverbose():
return args.vverbose
def includeLink(link):
"""
Determine if the passed Link should be excluded by checking the list of exclusions
Returns whether the link should be included
"""
try:
global lstExclusions
include = True
# Exclude if the finding is an endpoint link but has more than one newline character. This is a false
# positive that can sometimes be raised by the regex
# And exclude if the link:
# - starts with literal characters \n
# - has any characters that aren't printable
# - starts with #
# - start with $
# - starts with \
# - has any white space characters in
# - has any new line characters in
# - doesn't have any letters or numbers in
# - if the ascii-only argument was True AND the link contains non ASCII characters
try:
if link.count("\n") > 1 or link.startswith("#") or link.startswith("$") or link.startswith("\\"):
include = False
if include:
include = link.isprintable()
if include:
include = not (bool(re.search(r"\s", link)))
if include:
include = not (bool(re.search(r"\n", link)))
if include:
include = bool(re.search(r"[0-9a-zA-Z]", link))
if include and args.ascii_only:
include = link.isascii()
except Exception as e:
if vverbose():
writerr("ERROR includeLink 2: " + str(e))
if include:
# Go through lstExclusions and see if finding contains any. If not then continue
# If it fails then try URL encoding and then checking
linkWithoutQueryString = link.split("?")[0].lower()
for exc in lstExclusions:
try:
if str(linkWithoutQueryString.encode(encoding="ascii",errors="ignore")).find(exc.lower()) >= 0:
include = False
except Exception as e:
if vverbose():
writerr(
colored(
"ERROR includeLink 3: Failed to check exclusions for a finding on URL: "
+ link
+ " ("
+ str(e)
+ ")",
"red",
)
)
# If the -sf --scope-filter argument is True then a link should only be included if in the scope
# but ignore any links that just start with a single /
if not link.startswith("/") or link.startswith("//"):
if include and args.scope_filter:
try:
include = False
if inScopeFilterDomains is None:
search = args.scope_filter.replace(".", "\.")
search = search.replace("*", "")
regexStr = r"^([A-Z,a-z]*)?(:\/\/|//|^)[^\/|?|#]*" + search
if re.search(regexStr, link):
include = True
else:
for search in inScopeFilterDomains:
search = search.replace(".", "\.")
search = search.replace("*", "")
search = search.replace("\n", "")
if search != "":
regexStr = (
r"^([A-Z,a-z]*)?(:\/\/|//|^)[^\/|?|#]*" + search
)
if re.search(regexStr, link):
include = True
except Exception as e:
if vverbose():
writerr(
colored(
"ERROR includeLink 4: Failed to check scope filter for a checking URL: "
+ link
+ " ("
+ str(e)
+ ")",
"red",
)
)
except Exception as e:
if vverbose():
writerr(colored("ERROR includeLink 1: " + str(e), "red"))
return include
def includeFile(fileOrUrl):
"""
Determine if the passed file name (or URL) should be excluded by checking the list of exclusions
Returns whether the file should be included
"""
try:
global lstFileExtExclusions
include = True
# If a URL is passed, we want to remove any query string or fragment
fileOrUrl = fileOrUrl.split("?")[0].split("#")[0].lower()
# Go through lstFileExtExclusions and see if finding contains any. If not then continue
for exc in lstFileExtExclusions:
try:
if fileOrUrl.endswith(exc.lower()):
include = False
except Exception as e:
if vverbose():
writerr(colored("ERROR includeFile 2: Failed to check exclusions for a finding on file/url: " + fileOrUrl + " (" + str(e) + ")", "red"))
except Exception as e:
if vverbose():
writerr(colored("ERROR includeFile 1: " + str(e), "red"))
return include
def includeContentType(header,url):
"""
Determine if the content type is in the exclusions
Returns whether the content type is included
"""
global burpFile, zapFile, caidoFile
include = True
try:
# Get the content-type from the response
try:
if burpFile or zapFile or caidoFile:
contentType = re.findall(
r"(?<=Content-Type\:\s)[a-zA-Z\-].+\/[a-zA-Z\-].+?(?=\s|\;)",
header,
re.IGNORECASE,
)[0]
else:
contentType = header["content-type"]
contentType = contentType.split(";")[0]
except Exception as e:
contentType = ""
# Check the content-type against the comma separated list of exclusions
lstExcludeContentType = CONTENTTYPE_EXCLUSIONS.split(",")
for excludeContentType in lstExcludeContentType:
if contentType.lower() == excludeContentType.lower():
include = False
# If the content type can be included and -vv option was passed, add to the set to display at the end
if vverbose():
try:
if include and contentType != '':
contentTypesProcessed.add(contentType)
except:
pass
# If the content type wasn't found, check against file extensions
if contentType == "":
url = url.split("?")[0].split("#")[0].split("/")[-1]
if url.find(".") > 0:
include = includeFile(url)
except Exception as e:
if vverbose():
writerr(colored("ERROR includeContentType 1: " + str(e), "red"))
return include
# Add a link to the list and potential parameters from the link if required
def addLink(link, url, prefixed=False):
link = link.replace("&", "&")
link = link.replace("\\x26", "&")
link = link.replace("\\u0026", "&")
link = link.replace("=", "=")
link = link.replace("\\x3d", "=")
link = link.replace("\\u003d", "=")
# Add the link to the list
try:
linkDetail = link
if args.origin:
linkDetail = linkDetail + " [" + url + "]"
if prefixed:
linkDetail = linkDetail + " (PREFIXED)"
linksFound.add(linkDetail)
except Exception as e:
if vverbose():
writerr(colored("ERROR addLink 1: " + str(e), "red"))
# Also add any relevant potential parameters
try:
# Get words in the URL path to add as potential parameters and words
if RESP_PARAM_PATHWORDS or args.output_wordlist != "":
getPathWords(url)
getPathWords(link)
# Get parameters from links if requested
if RESP_PARAM_LINKSFOUND and link.count("?") > 0:
# Get parameters from the link
try:
param_keys = re.finditer(r"(?<=\?|&)[^\=\&\n].*?(?=\=|&|\n)", link)
for param in param_keys:
if param is not None and param.group() != "":
# Only add the parameter if argument --ascii-only is False, or if its True and only contains ASCII characters
if not args.ascii_only or (args.ascii_only and param.group().strip().isascii()):
paramsFound.add(param.group().strip())
except Exception as e:
if vverbose():
writerr(colored("ERROR addLink 2: " + str(e), "red"))
except Exception as e:
if vverbose():
writerr(colored("ERROR addLink 3 " + str(e), "red"))
def getResponseLinks(response, url):
"""
Get a list of links found
"""
global inScopePrefixDomains, burpFile, zapFile, caidoFile, dirPassed
try:
# if the --include argument is True then add the input links to the output too (unless the input was a directory)
if args.include and not dirPassed:
addLink(url, url)
if burpFile or zapFile or caidoFile:
if burpFile:
# \r\n\r\n separates the header and body. Get the position of this
# but if it is 0 then there is no body, so set it to the length of response
bodyHeaderDivide = response.find("\r\n\r\n")
else:
# \n\n separates the header and body. Get the position of this
# but if it is 0 then there is no body, so set it to the length of response
bodyHeaderDivide = response.find("\n\n")
if bodyHeaderDivide == 0:
bodyHeaderDivide = len(response)
header = response[:bodyHeaderDivide]
# Remove the Status line and content-type from response so we don't mistakenly get "Links" from them
body = "\n".join(response.split("\n")[1:])
body = re.sub(r"(?m)^content-type:.*\n", "", body.lower())
responseUrl = url
else:
if dirPassed:
body = response
header = ""
responseUrl = url
else:
body = str(response.headers) + "\r\n\r\n" + response.text
header = response.headers
responseUrl = response.url
# Some URLs may be displayed in the body within strings that have different encodings of / and : so replace these
pattern = re.compile("(/|%2f|\\u002f|\\\/)", re.IGNORECASE)
body = pattern.sub("/", body)
pattern = re.compile("(:|%3a|\\u003a|\\\/)", re.IGNORECASE)
body = pattern.sub(":", body)
# Replace occurrences of HTML entity " with an actual double quote
body = body.replace('"','"')
# Take the LINK_REGEX_FILES values and build a string of any values over 4 characters or has a number in it
# This is used in the 4th capturing group Link Finding regexwebsocket
lstFileExt = LINK_REGEX_FILES.split("|")
LINK_REGEX_NONSTANDARD_FILES = ""
for ext in lstFileExt:
if len(ext) > 4 or any(chr.isdigit() for chr in ext):
if LINK_REGEX_NONSTANDARD_FILES == "":
LINK_REGEX_NONSTANDARD_FILES = ext
else:
LINK_REGEX_NONSTANDARD_FILES = (
LINK_REGEX_NONSTANDARD_FILES + "|" + ext
)
try:
# If it is content-type we want to process then carry on, or if a directory was passed (so there is no content type) ensure the filename is not an exclusion
if (dirPassed and includeFile(url)) or (
not dirPassed and includeContentType(header,responseUrl)
):
reString = (
r"(?:^|\"|'|\\n|\\r|\n|\r|\s?)(((?:[a-zA-Z]{1,10}:\/\/|\/\/)([^\"'\/]{1,}\.[a-zA-Z]{2,}|localhost)[^\"'\n\s]{0,})|((?:\/|\.\.\/|\.\/)[^\"'><,;| *()(%%$^\/\\\[\]][^\"'><,;|()\s]{1,})|([a-zA-Z0-9_\-\/]{1,}\/[a-zA-Z0-9_\-\/]{1,}\.(?:[a-zA-Z]{1,4}"
+ LINK_REGEX_NONSTANDARD_FILES
+ ")(?:[\?|\/][^\"|']{0,}|))|([a-zA-Z0-9_\-]{1,}\.(?:"
+ LINK_REGEX_FILES
+ ")(?:\?[^\"|^']{0,}|)))(?:\"|'|\\n|\\r|\n|\r|\s|$)|(?<=^Disallow:\s)[^\$\n]*|(?<=^Allow:\s)[^\$\n]*|(?<= Domain\=)[^\";']*|(?<=\<)https?:\/\/[^>\n]*|(?<=\=)\s*\/[0-9a-zA-Z]+[^>\n]*"
)
link_keys = re.finditer(reString, body, re.IGNORECASE)
for key in link_keys:
if key is not None and key.group() != "" and len(key.group()) > 2:
link = key.group()
link = link.strip("\"'\n\r( ")
link = link.replace("\\n", "")
link = link.replace("\\r", "")
link = link.replace("\\.",".")
try:
first = link[:1]
last = link[-1]
firstTwo = link[:2]
lastTwo = link[-2]
if (
first == '"'
or first == "'"
or first == "\n"
or first == "\r"
or firstTwo == "\\n"
or firstTwo == "\\r"
) and (
last == '"'
or last == "'"
or last == "\n"
or last == "\r"
or lastTwo == "\\n"
or lastTwo == "\\r"
):
if firstTwo == "\\n" or firstTwo == "\\r":
start = 2
else:
start = 1
if lastTwo == "\\n" or lastTwo == "\\r":
end = 2
else:
end = 1
link = link[start:-end]
# If there are any trailing back slashes, comma, ; or >; remove them all
link = link.rstrip("\\")
link = link.rstrip(">;")
link = link.rstrip(";")
link = link.rstrip(",")
# If there are any backticks in the URL, remove everything from the backtick onwards
link = link.split("`")[0]
# If there are any closing brackets of any kind without an opening bracket, remove everything from the closing bracket onwards
if re.search(r"^[^(]*\)*$",link):
link = link.split(")", 1)[0]
if re.search(r"^[^{}]*\}*$",link):
link = link.split("}", 1)[0]
if re.search(r"^[^\[]]*\]*$",link):
link = link.split("]", 1)[0]
# If there is a </ in the link then strip from that forward
if re.search(r"<\/", link):
link = link.split("</", 1)[0]
except Exception as e:
if vverbose():
writerr(colored(getSPACER("ERROR getResponseLinks 2: " + str(e)), "red"))
# If the link starts with a . and the 2nd character is not a . or / then remove the first .
if link[0] == "." and link[1] != "." and link[1] != "/":
link = link[1:]
# Only add the finding if it should be included
if includeLink(link):
# If the link found is for a .js.map file then put the full .map URL in the list
if link.find("//# sourceMappingURL") >= 0:
# Get .map link after the =
firstpos = link.rfind("=")
lastpos = link.find("\n")
if lastpos <= 0:
lastpos = len(link)
mapFile = link[firstpos + 1 : lastpos]
# Get the responseurl up to last /
lastpos = responseUrl.rfind("/")
mapPath = responseUrl[0 : lastpos + 1]
# Add them to get link of js.map and add to list
link = mapPath + mapFile
link = link.replace("\n", "")
# If a link starts with // then add http:
if link.startswith("//"):
link = "http:" + link
# If the -sp (--scope-prefix) option was passed and the link doesn't start with any type of schema
if (
args.scope_prefix is not None
and re.match(r"^[a-z0-9\-]{2,}\:\/\/", link.lower()) is None
):
# If -spo is passed, then add the original link
if args.scope_prefix_original:
addLink(link, responseUrl)
# If the -sp (--scope-prefix) option is a name of a file, then add a link for each scope domain
if inScopePrefixDomains is not None:
count = 0
processLink = True
for domain in inScopePrefixDomains:
# Get the domain without a schema
domainTest = link
if domainTest.find("//") >= 0:
domainTest = domainTest.split("//")[1]
# Get the prefix without a schema
prefixTest = domain
if prefixTest.find("//") >= 0:
prefixTest = prefixTest.split("//")[1]
# If the link doesn't start with the domain or prefix then carry on
if not link.lower().startswith(
domainTest
) and not link.lower().startswith(prefixTest):
processLink = False
if processLink:
# If the link doesn't start with a / and doesn't start with http then prefix it with a / before we prefix with the -sp (--scope-prefix)
if not link.startswith(
"/"
) and not link.lower().startswith("http"):
link = "/" + link
for domain in inScopePrefixDomains:
count += 1
prefix = "{}".format(domain.strip())
if prefix != "":
addLink(prefix + link, responseUrl, True)
else: # else just prefix wit the -sp value
prefix = args.scope_prefix
# Get the prefix without a schema
prefixTest = args.scope_prefix
if prefixTest.find("//") >= 0:
prefixTest = prefixTest.split("//")[1]
# Get the domain without a schema
domainTest = args.input
if domainTest.find("//") >= 0:
domainTest = domainTest.split("//")[1]
# If the link doesn't start with the domain or prefix then carry on
if not link.lower().startswith(
domainTest
) and not link.lower().startswith(prefixTest):
# If the link doesn't start with a / and doesn't start with http, then prefix it with a / before we prefix with the -sp (--scope-prefix)
if not link.startswith(
"/"
) and not link.lower().startswith("http"):
link = "/" + link
if not prefix.lower().startswith("http"):
prefix = "http://" + prefix
addLink(prefix + link, responseUrl, True)
else:
addLink(link, responseUrl)
except Exception as e:
if vverbose():
writerr(colored(getSPACER("ERROR getResponseLinks 3: " + str(e)), "red"))
# Also add a link of a js.map file if the X-SourceMap or SourceMap header exists
if not dirPassed:
try:
# See if the SourceMap header exists
try:
if burpFile or zapFile or caidoFile:
mapFile = re.findall(
r"(?<=SourceMap\:\s).*?(?=\n)", header, re.IGNORECASE
)[0]
else:
mapFile = header["sourcemap"]
except:
mapFile = ""
# If not found, try the deprecated X-SourceMap header
if mapFile != "":
try:
if burpFile or zapFile or caidoFile:
mapFile = re.findall(
r"(?<=X-SourceMap\:\s).*?(?=\n)", header, re.IGNORECASE
)[0]
else:
mapFile = header["x-sourcemap"]
except:
mapFile = ""
# If a map file was found in the response, then add a link for it
if mapFile != "":
addLink(mapFile)
except Exception as e:
if vverbose():
writerr(colored(getSPACER("ERROR getResponseLinks 4: " + str(e)), "red"))
except Exception as e:
if vverbose():
writerr(colored(getSPACER("ERROR getResponseLinks 1: " + str(e)), "red"))
def handler(signal_received, frame):
"""
This function is called if Ctrl-C is called by the user
An attempt will be made to try and clean up properly
"""
global stopProgram, stopProgramCount
if stopProgram is not None:
stopProgramCount = stopProgramCount + 1
if stopProgramCount == 1:
writerr(
colored(
getSPACER(
">>> Please be patient... Trying to save data and end gracefully!"
),
"red",
)
)
elif stopProgramCount == 2:
writerr(
colored(
getSPACER(">>> SERIOUSLY... YOU DON'T WANT YOUR DATA SAVED?!"),
"red",
)
)
elif stopProgramCount == 3:
writerr(
colored(
getSPACER(">>> Patience isn't your strong suit eh? ¯\_(ツ)_/¯"),
"red",
)
)
sys.exit()
else:
stopProgram = StopProgram.SIGINT
writerr(
colored(
getSPACER('>>> "Oh my God, they killed Kenny... and xnLinkFinder!" - Kyle'),
"red",
)
)
writerr(
colored(
getSPACER(">>> Attempting to rescue any data gathered so far..."), "red"
)
)
def getMemory():
global currentMemUsage, currentMemPercent, maxMemoryUsage, maxMemoryPercent, stopProgram
currentMemUsage = process.memory_info().rss
currentMemPercent = math.ceil(psutil.virtual_memory().percent)
if currentMemUsage > maxMemoryUsage:
maxMemoryUsage = currentMemUsage
if currentMemPercent > maxMemoryPercent:
maxMemoryPercent = currentMemPercent
if currentMemPercent > args.memory_threshold:
stopProgram = StopProgram.MEMORY_THRESHOLD
# If memory limit hasn't been reached, check the max time limit
if stopProgram is None:
checkMaxTimeLimit()
def shouldMakeRequest(url):
# Should we request this url?
makeRequest = False
# Only process if we haven't visited the link before, it isn't blank and it doesn't start with a . or just one /
if url not in linksVisited and url != "" and not url.startswith("."):
if not url.startswith("/") or url.startswith("//"):
makeRequest = True
return makeRequest
def processUrl(url):
global burpFile, zapFile, caidoFile, totalRequests, skippedRequests, failedRequests, userAgent, requestHeaders, tooManyRequests, tooManyForbidden, tooManyTimeouts, tooManyConnectionErrors, stopProgram, waymoreMode, stopProgram, failedPrefixLinks
# Choose a random user agent string to use from the current group
userAgent = random.choice(userAgents[currentUAGroup])
requestHeaders["User-Agent"] = userAgent
try:
# If waymore Mode then the url maybe from index.txt get the source URL from the line
if waymoreMode and args.input.endswith("index.txt") :
values = url.split(",")
archiveUrl = values[1]
index = archiveUrl.index("http",5)
url = archiveUrl[index:]
except Exception as e:
pass
try:
# Check if the URL was prefixed and remove the tag
originalUrl = url
prefixed = False
failedPrefix = False
prefix = " (PREFIXED)"
if url.find(prefix) > 0:
prefixed = True
url = url.replace(prefix,"")
url = url.strip().rstrip("\n")
# If the url has the origin at the end (.e.g [...]) then strip it off before processing
if url.find("[") > 0:
url = str(url[0 : url.find("[") - 2])
# If the url has *. in it, remove that before we try to request it
url = url.replace("*.","")
# If we should make the current request
if shouldMakeRequest(url):
# Add the url to the list of visited URls so we don't visit again
# Don't do this for Burp, ZAP or Caido files as they can be huge, or for file names in directory mode
if not burpFile and not zapFile and not caidoFile and not dirPassed:
linksVisited.add(url)
# Get memory usage every 25 requests
if totalRequests % 25 == 0:
try:
getMemory()
except:
pass
# Get response from url
if stopProgram is None:
try:
requestUrl = url
if not url.lower().startswith("http"):
requestUrl = "http://" + url
# If the --replay-proxy argument was passed, try to use it
if args.replay_proxy != "":
proxies = {
"http": args.replay_proxy,
"https": args.replay_proxy,
}
verify = False
else:
proxies = {}
verify = not args.insecure
# Suppress insecure request warnings if using insecure mode
if not verify:
requests.packages.urllib3.disable_warnings(
category=InsecureRequestWarning
)
# Make the request
resp = requests.get(
requestUrl,
headers=requestHeaders,
timeout=args.timeout,
allow_redirects=True,
verify=verify,
proxies=proxies,
)
# If the replay proxy is being used, and the title in the response contains "Burp Suite" and has an error of "Unknown Host" then set the response code to 504. This is because if Burp is used for a proxy, it returns 200 because the response is the error from Burp.
if args.replay_proxy and resp.text.find('<title>Burp Suite') > 0:
if resp.text.find('Unknown host') > 0:
resp.status_code = 504
else:
if os.environ.get('USER') == 'xnl':
try:
writerr(colored(getSPACER('Burp Response - Code: '+str(resp.status_code)+'\nResp: ' + resp.text), 'yellow'))
except:
pass
if resp.status_code == 200:
if verbose():
msg = "Response " + str(resp.status_code) + ": " + url
if prefixed:
msg = msg + prefix
write(colored(msg,"green"))
else:
if verbose():
msg = "Response " + str(resp.status_code) + ": " + url
if prefixed:
msg = msg + prefix
write(colored(msg,"yellow"))
# If argument -s429 was passed, keep a count of "429 Too Many Requests" and stop the program if > 95% of responses have status 429, but only if at least 10 requests have already been made
if args.s429 and resp.status_code == 429:
tooManyRequests = tooManyRequests + 1
try:
if (tooManyRequests / totalRequests * 100) > 95 and totalRequests > 10:
stopProgram = StopProgram.TOO_MANY_REQUESTS
except:
pass
# If argument -s403 was passed, keep a count of "403 Forbidden" and stop the program if > 95% of responses have status 403, but only if at least 10 requests have already been made
if args.s403 and resp.status_code == 403:
tooManyForbidden = tooManyForbidden + 1
try:
if (tooManyForbidden / totalRequests * 100) > 95 and totalRequests > 10:
stopProgram = StopProgram.TOO_MANY_FORBIDDEN
except:
pass
# If the -spkf wasn't passed, the response was 404, and the URL was prefixed, flag it as a failed link, else get links and parameters from the response
if not args.scope_prefix_keep_failed and prefixed and resp.status_code == 404:
failedPrefix = True
else:
# Get potential links from the response
getResponseLinks(resp, url)
totalRequests = totalRequests + 1
# Get potential parameters from the response
getResponseParams(resp, url)
except requests.exceptions.ProxyError as pe:
writerr(
colored(
"Cannot connect to the proxy " + args.replay_proxy, "red"
)
)
pass
except requests.exceptions.ConnectionError as errc:
failedRequests = failedRequests + 1
if verbose():
# Check for certificate verification failure and suggest using -insecure