1
0
Fork 0
This commit is contained in:
NaiJi ✨ 2024-04-09 00:38:52 +04:00
commit 8a43fd1050
28 changed files with 3273 additions and 0 deletions

3
README.md Normal file
View File

@ -0,0 +1,3 @@
# Fediverse Bots
I used to be on [Fediverse](https://fediverse.party/en/fediverse/) a lot, mainly [Pleroma](https://pleroma.social/). During that time I created a bunch of dumb bots for fun and basic Python/shell script practice. Install accordingly to their own READMEs, if there is none it means you should just create a `token.dat` with it and provide there bot's account token, that's it.

3
maid-bot/.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
token.dat
venv
*.log

20
maid-bot/README.md Normal file
View File

@ -0,0 +1,20 @@
# maid-bot
Another bot for posting arts to your fediverse timeline.
### Initial setup
* Create sources.dat file and fill them with URLs where
```
source_url_to_wget hyperlink_url_to_put_in_post_description
source_url_to_wget hyperlink_url_to_put_in_post_description
source_url_to_wget hyperlink_url_to_put_in_post_description
source_url_to_wget hyperlink_url_to_put_in_post_description
. . . . . . . . . . etc
```
* Fill token.dat with bot's auth token.
* Edit post-local.sh and write custom text. in "status" field of POST json.
* Now put it to crontab!
The bot will take random pair of URLs from the file, download the image itself by the first url and post it to fedi, attaching the second url as description.

49
maid-bot/post.sh Normal file
View File

@ -0,0 +1,49 @@
#!/bin/bash
if [ -z $(which jq) ]
then
echo "Missing jq package, please install"
exit 1
fi
token_dat="./token.dat"
if [ ! -f $token_dat ]
then
echo "Missing ./token.dat"
exit 1
fi
sources_dat="./sources.dat"
if [ ! -f $sources_dat ]
then
echo "Missing ./sources.dat"
exit 1
fi
api_base_url="https://your.site/"
access_token="$(cat ${token_dat})"
pair_to_post=($(cat ${sources_dat} | shuf -n 1))
source_url=${pair_to_post[0]}
hyperlink_url=${pair_to_post[1]}
mkdir source
cd source
wget "${source_url}" 2> /dev/null || (echo "Error with ${source_url}" && exit 1)
media_json=$(curl -X POST "${api_base_url}/api/v1/media" \
-H "Authorization: Bearer ${access_token}" \
-F "file=@`ls *`")
media_id=$(jq -r ".id" <<< ${media_json})
echo $media_json
cd ..
rm -rf source/
curl -X POST -d '{"status":" :azrn_shiratsuyu_maid: '${hyperlink_url}'", "visibility":"'unlisted'", "media_ids":'[\"${media_id}\"]'}' \
-H "Authorization: Bearer ${access_token}" \
-H "Content-Type: application/json" \
"${api_base_url}/api/v1/statuses"

1
maritalk-bot/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
token.dat

53
maritalk-bot/post.py Executable file
View File

@ -0,0 +1,53 @@
#!/home/naiji/mastodon/maritalk-bot/venv/bin/python
import sys
import numpy as np
from mastodon import Mastodon
# --------------------------------------------------
def main():
mastodon = Mastodon(
access_token = 'token.dat',
api_base_url = 'https://your.site/'
)
text = open('source.dat', encoding='utf8').read()
# ORIGINAL IMPLEMENTATION:
# https://gist.github.com/bpshaver/840d53222b72d2612ddf6702ef020274#file-markov_text_sim-py
source = text.split()
def make_pairs(source):
for i in range(len(source)-1):
yield (source[i], source[i+1])
pairs = make_pairs(source)
word_dict = {}
for left_w, right_w in pairs:
if left_w in word_dict.keys():
word_dict[left_w].append(right_w)
else:
word_dict[left_w] = [right_w]
first_word = np.random.choice(source)
while first_word.islower():
first_word = np.random.choice(source)
chain = [first_word]
ch = ''
while not ch.endswith('.'):
ch = np.random.choice(word_dict[chain[-1]])
chain.append(ch)
toot = ' '.join(chain)
mastodon.status_post(toot, media_ids=None, visibility='unlisted')
if __name__ == '__main__':
sys.exit(main())

View File

@ -0,0 +1,38 @@
acme==1.1.0
certbot==0.40.0
certbot-nginx==0.40.0
certifi==2019.11.28
chardet==3.0.4
ConfigArgParse==0.13.0
configobj==5.0.6
cryptography==2.8
dbus-python==1.2.16
distro==1.4.0
distro-info===0.23ubuntu1
future==0.18.2
idna==2.8
josepy==1.2.0
mock==3.0.5
netifaces==0.10.4
parsedatetime==2.4
pbr==5.4.5
PyGObject==3.36.0
pymacaroons==0.13.0
PyNaCl==1.3.0
pyOpenSSL==19.0.0
pyparsing==2.4.6
pyRFC3339==1.1
python-apt==2.0.0+ubuntu0.20.4.1
python-debian===0.1.36ubuntu1
pytz==2019.3
PyYAML==5.3.1
requests==2.22.0
requests-toolbelt==0.8.0
six==1.14.0
ubuntu-advantage-tools==20.3
ufw==0.36
urllib3==1.25.8
zope.component==4.3.0
zope.event==4.4
zope.hookable==5.0.0
zope.interface==4.7.1

4
maritalk-bot/runner Normal file
View File

@ -0,0 +1,4 @@
#!/bin/bash
source venv/bin/activate
python3 post.py
deactivate

1
maritalk-bot/source.dat Normal file

File diff suppressed because one or more lines are too long

16
morrowsay-bot/README.md Normal file
View File

@ -0,0 +1,16 @@
# morrowsay-bot
A dumb bot to post random generic quotes to your fediverse timeline!
### How to run ###
* For posting you can use already pre-saved quotes from the repository. If you want "your own", you can...
* ...download quotes:
```bash
python3 loadquotes.py
```
* To make a single post with a random quote from the gotten file, run `post.py`
* Don't forget to setup your bot account, get access tokens and establish the environment around it. I am not providing any instructions of how to do it, since all the steps are different and depend on what, where and how you want to run the bot.
* That's all!
![alt text](https://i.imgur.com/3ao6VLt.png)

View File

@ -0,0 +1,30 @@
import sys
import os.path as op
from urllib.parse import urlparse
from pprint import pprint
import requests
from bs4 import BeautifulSoup
# --------------------------------------------------
URL = "https://elderscrolls.fandom.com/wiki/Generic_Dialogue_(Morrowind)"
def main():
print('REQUEST ' + URL)
resp = requests.get(URL)
soup = BeautifulSoup(resp.text, 'lxml')
print('DOWNLOADING')
res = [icont.string for icont in soup('i') if icont.string is not None]
with open('quotes.dat', 'w', encoding='utf-8') as file:
for quote in res:
print(quote, file=file)
print('YAY SUCCESS!\n\n! ! ! Now don\'t forget to run replacevars.py ! ! !')
if __name__ == '__main__':
sys.exit(main())

29
morrowsay-bot/post.py Normal file
View File

@ -0,0 +1,29 @@
import sys
import random
import datetime
import os.path as op
from pprint import pprint
import requests
from mastodon import Mastodon
# --------------------------------------------------
def main():
mastodon = Mastodon(
access_token = 'token.dat',
api_base_url = 'https://your.site/'
)
with open('quotes.dat', 'r', encoding='utf-8') as file:
data = file.readlines()
quote = data[random.randint(0, len(data))]
toot = quote
mastodon.status_post(toot, media_ids=None, visibility='unlisted')
if __name__ == '__main__':
sys.exit(main())

2258
morrowsay-bot/quotes.dat Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,64 @@
import sys
import os.path as op
# --------------------------------------------------
VARIABLES = {
'[PC Guild Rank]\nLike in \"[PC Guild Rank], what can be said. We are the few, the proud, the ... under-paid! HarHarHar!\"':
'[PC Guild Rank]',
'[Speaker\'s Name]\nLike in \"But perhaps [Speaker\'s Name] should not speak about this in public. And who would listen, anyway?\"':
'[Speaker\'s Name]',
'[PC Name]\nLike in \"[PC Name]. Once before, Lord Nerevar thought Dagoth Ur and all his kin were dead.\"':
'[PC Name]',
'[PC Race]\nLike in \"I don\'t like you, [PC Race]. You better stay out of this.\"':
'[PC Race]',
'[PC Rank]\nLike in \"What do you want, [PC Rank]?\"':
'[PC Rank]',
'[Next PC Rank]\nLike in "You are now [PC Name] the [Next PC Rank] in the Fighters Guild.\"':
'[Next PC Rank]',
'[Faction]\nLike in \"The [Faction] can forgive your actions this one time only, [PC Rank] [PC Name].\"':
'[Faction]',
'[Rank]\nLike in \"I\'ve risen to [Rank] rank in this outfit by being smart and careful.\"':
'[Rank]',
'[Name]\nLike in \"Hello, [PC Name]. I\'m [Name], and I\'m here to clean out this den of Daedra worshippers.\"':
'[Name]'
}
# Replaces [macros] with strings you want
def main():
with open('quotes.dat', 'r', encoding='utf-8') as file:
quotes = file.readlines()
replacer = ''
if (input('Do you want to remove broken strings? y/n ') == 'y'):
quotes.remove('The Elder Scrolls III: Morrowind\n')
for quote in quotes:
if (len(quote) < 5 or not quote.strip().endswith('\"') or not quote.strip().startswith('\"')):
quotes.remove(quote)
for to_print, to_replace in VARIABLES.items():
while not replacer.strip():
replacer = input('\n' + to_print + '\n Input what do you want to replace ' + to_replace + ' with:')
for i, quote in enumerate(quotes):
quotes[i] = quote.replace(to_replace, replacer)
replacer = ''
with open('quotes.dat', 'w', encoding='utf-8') as file:
for quote in quotes:
print(quote, file=file, end='')
if __name__ == '__main__':
sys.exit(main())

View File

@ -0,0 +1,19 @@
requests==2.22.0
requests-oauthlib==1.2.0
beautifulsoup4==4.8.0
certifi==2019.6.16
chardet==3.0.4
decorator==4.4.0
idna==2.8
lxml==4.3.4
Mastodon.py==1.4.6
oauthlib==3.0.2
PySocks==1.7.0
python-dateutil==2.8.0
python-docx==0.8.10
python-magic==0.4.15
pytz==2019.2
six==1.12.0
soupsieve==1.9.2
tweepy==3.8.0
urllib3==1.25.3

21
music-bot/README.md Normal file
View File

@ -0,0 +1,21 @@
# music-bot
Another useless bot for posting garbage to your timeline.
### Initial setup
* Have installed
* curl
* jq
* exiftool
* Create ./music/ folder
* In ./music/ folder create separate folders for each release (albums/singles/etc.)
* In each ./music/*/
* put music files
* put a corresponding cover image named 'cover.jpg' to attach together with a music file
* put a simple file named 'link' and copypaste there a url to corresponding information about the release
* Edit post.sh
* change instance url from your.site to whatever you have
* optionally edit status message in the last POST curl
* Be careful, because by default it is assumed all music has .opus extension. If it is not your case please handle the situation however you need.
* Now put it to crontab!

69
music-bot/post.sh Normal file
View File

@ -0,0 +1,69 @@
#!/bin/bash
if [ -z $(which jq) ]
then
echo "Missing jq package, please install"
exit 1
fi
token_dat="./token.dat"
if [ ! -f $token_dat ]
then
echo "Missing ./token.dat"
exit 1
fi
sources_dat="./music"
if [ ! -d $sources_dat ]
then
echo "Missing ./music/"
exit 1
fi
if [ 0 -eq $(ls ./music | wc -l) ]
then
mv posted_already/* music/
fi
api_base_url="https://your.site/"
access_token="$(cat ${token_dat})"
folder_name=$(ls music/ | shuf -n 1)
track_name=$(ls music/"${folder_name}" | grep opus | shuf -n 1)
echo "From: $folder_name"
echo "Pick: $track_name"
echo " "
cp -f music/"$folder_name"/"$track_name" temp.opus
cp -f music/"$folder_name"/cover.jpg ./cover.jpg
link=$(cat music/"$folder_name"/link)
full_title=$(exiftool -s -s -s -Title temp.opus)
full_artist=$(exiftool -s -s -s -Artist temp.opus)
media_cover_json=$(curl -X POST "${api_base_url}/api/v1/media" \
-H "Authorization: Bearer ${access_token}" \
-F "file=@`ls cover.*`")
media_music_json=$(curl -X POST "${api_base_url}/api/v1/media" \
-H "Authorization: Bearer ${access_token}" \
-F "file=@`ls temp.opus`")
media_cover_id=$(jq -r ".id" <<< ${media_cover_json})
media_music_id=$(jq -r ".id" <<< ${media_music_json})
echo $media_json
curl -X POST -d '{"status":"'"${full_title}"' by _ IT IS A TEST POST _ '"${full_artist}"' :mikulove: '"${link}"'", "visibility":"'unlisted'", "media_ids":'[\"${media_cover_id}\",\"${media_music_id}\"]'}' \
-H "Authorization: Bearer ${access_token}" \
-H "Content-Type: application/json" \
"${api_base_url}/api/v1/statuses"
rm -f temp.opus
rm -f cover.*
mkdir -p posted_already/
mv music/"$folder_name" posted_already/

6
udonge-bot/.gitignore vendored Normal file
View File

@ -0,0 +1,6 @@
/token.dat
/venv
/urls
/sources
/arts
*.log

11
udonge-bot/README.md Normal file
View File

@ -0,0 +1,11 @@
# udonge-bot
Another useless bot for posting arts to your fediverse timeline.
### Initial setup
* Install [jq](https://stedolan.github.io/jq/) package for curl json parsing.
* Create sfw/ and nsfw/ folders and fill them with arts.
* Fill token.dat with bot's auth token.
* Optionally modify however you need!
* Now put it to crontab!

37
udonge-bot/post-local.sh Normal file
View File

@ -0,0 +1,37 @@
#!/bin/bash
if [ -z $(which jq) ]
then
echo "Missing jq package, please install"
exit 1
fi
token_dat="./token.dat"
if [ ! -f $token_dat ]
then
echo "Missing ./token.dat"
exit 1
fi
api_base_url="https://your.site/"
access_token="$(cat ${token_dat})"
sensitivity="$(ls | grep sfw | shuf -n 1)"
source_url="${sensitivity}/$(ls ${sensitivity} | shuf -n 1)"
is_sensitive=true
if [ "${sensitivity}" == "sfw" ]
then
is_sensitive=false
fi
media_json=$(curl -X POST "${api_base_url}/api/v1/media" \
-H "Authorization: Bearer ${access_token}" \
-F "file=@${source_url}")
media_id=$(jq -r ".id" <<< ${media_json})
curl -X POST -d '{"status":" My test status..", "sensitive":'${is_sensitive}', "visibility":"'unlisted'", "media_ids":'[\"${media_id}\"]'}' \
-H "Authorization: Bearer ${access_token}" \
-H "Content-Type: application/json" \
"${api_base_url}/api/v1/statuses"

1
vndb-bot/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
token.dat

98
vndb-bot/post.py Executable file
View File

@ -0,0 +1,98 @@
#!/home/naiji/mastodon/vndb-bot/venv/bin/python
import re
import sys
import random
import requests
import vndb as v # author HarHar (https://github.com/HarHar)
from bs4 import BeautifulSoup
from mastodon import Mastodon
URL_HEAD = 'https://vndb.org/v/rand'
FORBIDDEN_TAGS = [2023, 156, 162, 897, 391, 98, 2047, 1341, 83]
def main():
#Logging into VNDB
vndb = v.VNDB('VNDBbot', '0.1', 'LOGIN', 'PASSWORD')
id = -1
safe = True
while True: # Searching for a good vn
# Taking a random visual novel
resp = requests.get(URL_HEAD)
soup = BeautifulSoup(resp.text, 'lxml')
# Extracting its ID
id = int(soup.find('base')['href'].split('v')[2])
# getting tags of a VN by given random ID
vndb_result = vndb.get('vn', 'tags', '(id=' + str(id) + ')', '') # getting all the VNs on VNDB
vn_tags = vndb_result['items'][0]['tags']
good_vn = True #supposing
for tag in vn_tags:
for forbidden_tag in FORBIDDEN_TAGS:
if int(tag[0]) == forbidden_tag:
good_vn = False # it contains a bad tag
if not good_vn:
continue
# getting stats of the VN
vndb_result = vndb.get('vn', 'stats', '(id=' + str(id) + ')', '') # getting all the VNs on VNDB
vn_stats = vndb_result['items'][0]
popularity = vn_stats['popularity'] if vn_stats['popularity'] else -1
rating = vn_stats['rating'] if vn_stats['rating'] else -1
votecount = vn_stats['votecount'] if vn_stats['votecount'] else -1
if votecount < 10 or rating < 5 or popularity < 0.8:
continue
# getting details of the VN
vndb_result = vndb.get('vn', 'details', '(id=' + str(id) + ')', '') # getting all the VNs on VNDB
vn_details = vndb_result['items'][0]
# even slightly suggestive or slightly violent go to Sensitive, so we skip it
if (vn_details['image_flagging']['sexual_avg'] > 1) or (vn_details['image_flagging']['violence_avg'] > 1):
safe = False
# getting basic information of the VN
vndb_result = vndb.get('vn', 'basic', '(id=' + str(id) + ')', '')
vn_basic = vndb_result['items'][0]
title = vn_basic['title'] if vn_basic['title'] else ''
description = vn_details['description'] if vn_details['description'] else ''
released = vn_basic['released'] if vn_basic['released'] else 'unknown'
link = 'https://vndb.org/v' + str(id)
languages = ''
for language in vn_basic['languages']:
languages += language + ' '
# logging in and posting
mastodon = Mastodon(
access_token = 'token.dat',
api_base_url = 'https://your.site/',
feature_set = 'pleroma'
)
text = title + '\n- - - - - - - -\n\n' + description + '\n\nReleased: ' + released + '\nPopularity: ' + (str(popularity) if popularity > -1 else 'unknown') + '\nRating:' + str(rating) + '\nLanguages: ' + languages + '\n\n' + link
# getting screenshots of the VN
vndb_result = vndb.get('vn', 'screens', '(id=' + str(id) + ')', '')
vn_screens = vndb_result['items'][0]['screens']
screens = [ ]
counter = 0
screens.append(mastodon.media_post(requests.get(vn_details['image']).content, 'image/jpeg'))
for screen in vn_screens:
if screen['flagging']['sexual_avg'] == 0 and screen['flagging']['violence_avg'] == 0:
screens.append(mastodon.media_post(requests.get(screen['image']).content, 'image/jpeg'))
counter += 1
if counter == 3:
break
mastodon.status_post(text, media_ids=screens, visibility='unlisted', sensitive=not safe, content_type='text/bbcode')
break
if __name__ == '__main__':
sys.exit(main())

118
vndb-bot/vndb.py Normal file
View File

@ -0,0 +1,118 @@
#!/usr/bin/env python
"""
@author: HarHar (https://github.com/HarHar)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import socket
import json
import time
class vndbException(Exception):
pass
class VNDB(object):
""" Python interface for vndb's api (vndb.org), featuring cache """
protocol = 1
def __init__(self, clientname, clientver, username=None, password=None, debug=False):
self.sock = socket.socket()
if debug: print('Connecting to api.vndb.org')
self.sock.connect(('api.vndb.org', 19534))
if debug: print('Connected')
if debug: print('Authenticating')
if (username == None) or (password == None):
self.sendCommand('login', {'protocol': self.protocol, 'client': clientname,
'clientver': float(clientver)})
else:
self.sendCommand('login', {'protocol': self.protocol, 'client': clientname,
'clientver': float(clientver), 'username': username, 'password': password})
res = self.getRawResponse()
if res.find('error ') == 0:
raise vndbException(json.loads(' '.join(res.split(' ')[1:]))['msg'])
if debug: print('Authenticated')
self.cache = {'get': []}
self.cachetime = 720 #cache stuff for 12 minutes
def close(self):
self.sock.close()
def get(self, type, flags, filters, options):
""" Gets a VN/producer
Example:
>>> results = vndb.get('vn', 'basic', '(title="Clannad")', '')
>>> results['items'][0]['image']
u'http://s.vndb.org/cv/99/4599.jpg'
"""
args = '{0} {1} {2} {3}'.format(type, flags, filters, options)
for item in self.cache['get']:
if (item['query'] == args) and (time.time() < (item['time'] + self.cachetime)):
return item['results']
self.sendCommand('get', args)
res = self.getResponse()[1]
self.cache['get'].append({'time': time.time(), 'query': args, 'results': res})
return res
def sendCommand(self, command, args=None):
""" Sends a command
Example
>>> self.sendCommand('test', {'this is an': 'argument'})
"""
whole = ''
whole += command.lower()
if isinstance(args, str):
whole += ' ' + args
elif isinstance(args, dict):
whole += ' ' + json.dumps(args)
output = '{0}\x04'.format(whole)
self.sock.send(output.encode('utf-8'))
def getResponse(self):
""" Returns a tuple of the response to a command that was previously sent
Example
>>> self.sendCommand('test')
>>> self.getResponse()
('ok', {'test': 0})
"""
res = self.getRawResponse()
cmdname = res.split(' ')[0]
if len(res.split(' ')) > 1:
args = json.loads(' '.join(res.split(' ')[1:]))
if cmdname == 'error':
if args['id'] == 'throttled':
raise vndbException('Throttled, limit of 100 commands per 10 minutes')
else:
raise vndbException(args['msg'])
return (cmdname, args)
def getRawResponse(self):
""" Returns a raw response to a command that was previously sent
Example:
>>> self.sendCommand('test')
>>> self.getRawResponse()
'ok {"test": 0}'
"""
finished = False
whole = ''
while not finished:
whole += self.sock.recv(4096).decode('utf-8')
if '\x04' in whole: finished = True
return whole.replace('\x04', '').strip()

2
vocaloiddb-bot/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
token.dat
venv

View File

@ -0,0 +1 @@
392

281
vocaloiddb-bot/post.py Executable file
View File

@ -0,0 +1,281 @@
#!/home/naiji/mastodon/vocaloiddb-bot/venv/bin/python
import re
import sys
import random
import requests
import os.path as op
from bs4 import BeautifulSoup
from mastodon import Mastodon
MIKUDB_HEAD = 'http://mikudb.moe/page/'
MIKUDB_TAIL = '/?s'
VOCADB_HEAD = 'https://vocadb.net/'
def findRandomAlbumUrl(last_page_id) -> str:
alarm_counter = 0
while True and alarm_counter < 5:
selection_page_id = random.randint(0, int(last_page_id))
resp = requests.get(MIKUDB_HEAD + str(selection_page_id) + MIKUDB_TAIL)
soup = BeautifulSoup(resp.text, 'lxml')
found_album = True
album_entries = soup.findAll('div', {'class': 'searchres album-box grid_19'})
if len(album_entries) != 0:
found_album = True
album_entry = random.choice(album_entries)
return str(album_entry.findAll('a', href=True)[0]["href"])
else:
alarm_counter += 1
return ""
def findAlbumImageUrl(soup) -> str:
image_soup = soup.findAll('a', {'rel': 'lightbox'})
if len(image_soup) == 0:
return ""
else:
return str(image_soup[0]["href"])
def findAlbumTitle(soup) -> str:
title_soup = soup.findAll('h1', {'class': 'album-title'}, text=True)
if len(title_soup) == 0:
return "UNKOWN TITLE!! somewhat the parser failed... idk, please ping @NaiJi on this post"
else:
return str(title_soup[0].get_text())
def main():
with open('last_page.dat', 'r', encoding='utf-8') as file:
last_page_id = file.readlines()[0]
album_url = findRandomAlbumUrl(last_page_id)
print(album_url)
if album_url == "":
return
# PARSING ACTUAL ALBUM PAGE
resp = requests.get(album_url)
soup = BeautifulSoup(resp.text, 'lxml')
image_url = findAlbumImageUrl(soup)
album_title = findAlbumTitle(soup)
# PARSING ALBUM INFO BOX
info_raw = str(soup.find('div', {'class': 'album-box album-infopost panel panel-default'}))
info_splits = info_raw.split('\n')
if len(info_splits) != 1:
span_token = '</span>'
li_token = '</li>'
tag_token = 'rel="tag">'
a_token = '</a>'
href_token = '<a href="'
href_end_token = '">'
# # # ALTERNATIVE NAME
alternative_name = ''
for split in info_splits:
if ' names:' in split:
begin = split.find(span_token, 0) + len(span_token)
end = split.find(li_token, 0)
alternative_name = split[begin : end]
break
# # # TYPE
type_names = []
for split in info_splits:
if 'Type:' in split:
amount = split.count(tag_token)
begin = 0
end = 0
for i in range(amount):
begin = split.find(tag_token, end) + len(tag_token)
end = split.find(a_token, begin)
type_names.append(split[begin : end])
break
# # # RELEASE YEAR
release_year = ''
for split in info_splits:
if 'Release Date:' in split:
begin = split.find(tag_token, 0) + len(tag_token)
end = split.find(a_token, 0)
release_year = split[begin : end]
break
# # # VOCALS
vocal_names = []
for split in info_splits:
if 'Vocals:' in split:
amount = split.count(tag_token)
begin = 0
end = 0
for i in range(amount):
begin = split.find(tag_token, end) + len(tag_token)
end = split.find(a_token, begin)
vocal_names.append(split[begin : end])
break
# # # PRODUCERS
producers_names = []
for split in info_splits:
if 'Producer:' in split:
amount = split.count(tag_token)
begin = 0
end = 0
for i in range(amount):
begin = split.find(tag_token, end) + len(tag_token)
end = split.find(a_token, begin)
producers_names.append(split[begin : end])
break
# # # GENRES
genres_names = []
for split in info_splits:
if 'Genre:' in split:
amount = split.count(tag_token)
begin = 0
end = 0
for i in range(amount):
begin = split.find(tag_token, end) + len(tag_token)
end = split.find(a_token, begin)
genres_names.append(split[begin : end])
break
# # # LINKS
links = []
for split in info_splits:
if 'Official site' in split:
amount = split.count(href_token)
begin = 0
end = 0
for i in range(amount):
begin = split.find(href_token, end) + len(href_token)
end = split.find(href_end_token, begin)
links.append(split[begin : end])
break
print(album_title)
print('--------')
print(alternative_name)
print(type_names)
print(vocal_names)
print(producers_names)
print(genres_names)
print(release_year)
print(links)
print(image_url)
# SEARCHING FOR YOUTUBE URL
youtube_url = ''
video_page_splits = str(soup).split('\n')
for split in video_page_splits:
if 'youtube' in split:
begin = split.find('src="', 0) + len('src="')
end = split.find('"', begin)
youtube_url = split[begin : end]
# SEARCHING FOR VOCADB URL
vocadb_url = ""
entry_content_soup = soup.findAll('div', {'class': 'entry-content'})
entry_content_splits = str(entry_content_soup).split('\n')
for split in entry_content_splits:
if 'vocadb.net' in split:
begin = split.find('a href="', 0) + len('a href="')
end = split.find('">Vo', 0)
vocadb_url = split[begin : end]
# PARSING VOCADB PAGE
external_links = []
vocadb_url = vocadb_url.replace('amp;', '')
if len(vocadb_url) > 0:
resp = requests.get(vocadb_url)
soup = BeautifulSoup(resp.text, 'lxml')
if len(soup.findAll('img', {'class': 'coverPic'})) > 0:
vocadb_splits = str(soup).split('\n')
for split in vocadb_splits:
if 'www.nicovideo.jp/watch' in split and len(youtube_url) == 0:
begin = split.find('href="', 0) + len('href="')
end = split.find('">', begin)
youtube_url = split[begin : end]
if 'class="extLink"' in split and 'amazon' not in split:
begin = split.find('href="', 0) + len('href="')
end = split.find('" onclick', begin)
external_links.append(split[begin : end])
print(external_links)
print(youtube_url)
text = "ALBUM:\n" + album_title
if len(alternative_name) > 0:
text += str('\n\nALTERNATIVE TITLES:\n' + alternative_name)
if len(type_names) > 0:
text += '\n\nTYPE:\n'
for type_name in type_names:
text += (type_name + '; ')
if len(vocal_names) > 0:
text += '\n\nVOCAL:\n'
for vocal_name in vocal_names:
text += (vocal_name + '; ')
if len(producers_names) > 0:
text += '\n\nPRODUCING:\n'
for producer_name in producers_names:
text += (producer_name + '; ')
if len(genres_names) > 0:
text += '\n\nGENRE:\n'
for genre_name in genres_names:
text += (genre_name + '; ')
if len(release_year) > 0:
text += str('\n\nRELEASED:\n' + release_year)
if len(youtube_url) > 0:
text += str('\n\nVIDEO: \n' + youtube_url)
text += str('\n\nMIKUDB: \n' + album_url)
if len(external_links) == 0:
external_links = links
if len(external_links) > 0:
text += '\n\nLINKS: \n'
for external_link in external_links:
text += (external_link + '\n\n')
mastodon = Mastodon(
access_token = 'token.dat',
api_base_url = 'https://your.site/'
)
fformat = op.splitext(image_url)[1][1:]
if (fformat == 'jpg'):
fformat = 'jpeg'
image_media = mastodon.media_post(requests.get(image_url).content, f'image/{fformat}')
mastodon.status_post(text, media_ids=[image_media], visibility='unlisted', sensitive=False)
if __name__ == '__main__':
sys.exit(main())

1
yuiyui-bot/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
token.dat

39
yuiyui-bot/post.py Normal file
View File

@ -0,0 +1,39 @@
import sys
import os.path as op
from urllib.parse import urlparse
import requests
from bs4 import BeautifulSoup
from mastodon import Mastodon
URL_HEAD = 'https://anime-pictures.net'
def main():
# Searching for the url of the highest rated art
resp = requests.get(URL_HEAD)
soup = BeautifulSoup(resp.text, 'lxml')
final_url = URL_HEAD + str(soup.findAll('span', {'class': 'img_block2'})[12]).split('"')[5]
print(final_url)
# Requesting its page and getting the actual full image
resp = requests.get(final_url)
soup = BeautifulSoup(resp.text, 'lxml')
src_url = URL_HEAD + str(soup.find('div', {'id': 'big_preview_cont'})).split('"')[5]
src_ext = src_url.split('.')[-1]
if src_ext == 'jpg':
src_ext = 'jpeg'
# Logging in and posting
mastodon = Mastodon(
access_token = 'token.dat',
api_base_url = 'https://your.site/'
)
media = mastodon.media_post(requests.get(src_url).content, 'image/' + src_ext)
toot = ':senko:\nurl: ' + final_url
mastodon.status_post(toot, media_ids=[media], visibility='unlisted', sensitive=False)
if __name__ == '__main__':
sys.exit(main())