Compare commits

..

No commits in common. "5f31ab9fcc4617a2ab0fa1b239d394a31398c322" and "eca7bed6f81f0c1a99da9c80ca1da81cd341c974" have entirely different histories.

13 changed files with 28 additions and 53 deletions

Binary file not shown.

Before

Width:  |  Height:  |  Size: 248 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 272 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 408 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 344 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 376 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 408 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 920 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 56 KiB

View File

@ -10,23 +10,12 @@ from os import mkdir, listdir
from re import findall
from time import sleep
from threading import Thread
'''
############## 4chan thread scrapper ################
This script is deisgned to look for specific words on 4 chan boards, and download all images from relevant thread.
Usage of a VPN is recommended since 4chan is a shady place. Use at your own risk !
. \\
,` ( ` SquiP
( \' "
`-.__)_
here we look for particular threads on 4chan and dl their images
'''
def getArgs():
'''Gets all the arguments passed to the script and returns them in a parse_args()-type object.
No args
@ -40,6 +29,7 @@ def getArgs():
parser.add_argument("-f", "--folder", help = "Folder in which downloads will go, ex: ./downloads", action="store", type=str, required=True)
parser.add_argument("-k", "--keyword", help = "keyword or phrase to look for in the threads, ex : 'cute thread'", action="store", type=str, required=True)
parser.add_argument("-c", "--constant", help = "Constantly download", action="store_true")
parser.add_argument("-t", "--threads", help = "Number of threads in case of constant run, defaults to 2", action="store", type=int, required=False)
#Creating the args object
args=parser.parse_args()
@ -115,60 +105,35 @@ def item_dl(sources, dlfolder):
'''
Download all items in the sources list to folder dlfolder, which we try to create"
Args:
- sources : a list of URLsi
- global folder_content : see folder_watch()
- sources : a list of URLs
'''
global folder_content
try:
#Making folder
try:
mkdir(dlfolder)
except FileExistsError:
print(f"{dlfolder} already exists, not creating")
#Deduplicating
imagenames = []
dir_content = listdir(dlfolder)
for source in sources:
fullsource = "http://" + source
imagename = findall(r"[^\/]*$", source)[0]
if imagename[:-4] not in folder_content:
if imagename[:-4] not in str(dir_content):
name = wget.download(fullsource, out=dlfolder)
print(f"{name} downloaded")
return True
def folder_watch(folder):
'''
Watch for the content of a folder and return its content.
Content is a string containing all the names of all the elements.
Args:
- folder : folder to watch
- global folder_content : see folder_watch()
Returns:
folder_content : said string, containing all the names of all the files in the folder
'''
global folder_content
folder_list = listdir(folder)
folder_content = ""
for i in folder_list:
folder_content += i
def dl_threads(folder, url):
def constant_dl(folder, url):
'''
Constantly download...
Args:
- folder: folder to dl into
- url : board to watch
'''
try:
while True:
sleep(2)
soup = html_get(url)
hrefs = thread_finder(soup, keyword)
@ -182,10 +147,9 @@ def dl_threads(folder, url):
print(f"going after {url}{href}")
subsoup = html_get(f"{url}{href}")
subsources = scraper(subsoup)
folder_watch(folder)
print(subsources)
item_dl(subsources, folder)
except Exception as e:
print(f"Houston, we had a problem: \n{e}")
@ -193,12 +157,15 @@ def dl_threads(folder, url):
args = getArgs()
folder = args.folder
keyword = args.keyword
if args.threads:
threadnumber = args.threads
else:
threadnumber = 2
url = args.url
soup = html_get(url)
hrefs = thread_finder(soup, keyword)
sources = scraper(soup)
folder_content = ""
#item_dl(sources, folder)
#Dling all threads found
@ -206,11 +173,16 @@ folder_content = ""
#oneshot
if not args.constant:
for href in hrefs:
folder_watch(folder)
dl_threads(folder_url)
print(f"going after {url}{href}")
subsoup = html_get(f"{url}{href}")
subsources = scraper(subsoup)
print(subsources)
item_dl(subsources, folder)
else:
while True:
folder_watch(folder)
dl_threads(folder, url)
thread_objects = []
for i in range (1, threadnumber):
thread_objects.append(Thread(target=constant_dl, args=(folder, url)))
for thread in thread_objects:
thread.start()

View File

@ -46,8 +46,11 @@ Use (constant, multi-threaded):
* -f : folder where you want to download all pictures
* -k : keyword or keyphrase to search (better use a single word !)
* -c : constant : enables constant downloading
* -t 3 : number of threads. Here, 3 threads keep on running to constantly download
##Todo
* Filter by filetype
* Multi-threaded not really working, -t 2 gives one thread and many threads will cause duplicates
* Use a try / catch when dling since some threads go 404 and it gives us a crash
* Make a pretty website with some keywords running in the bg, making for some nice public folders (wallpapers...)