2024-07-25 10:48:57 +00:00
|
|
|
#!/bin/env python3
|
2024-07-24 06:34:06 +00:00
|
|
|
import requests
|
|
|
|
import concurrent.futures
|
2024-07-24 09:52:12 +00:00
|
|
|
import argparse
|
|
|
|
from pathlib import Path
|
2024-07-24 06:34:06 +00:00
|
|
|
|
|
|
|
tracks_stor = []
|
|
|
|
with open('instances') as instances:
|
2024-07-24 09:52:12 +00:00
|
|
|
instances = instances.read().strip().split('\n')
|
|
|
|
|
|
|
|
|
|
|
|
parser = argparse.ArgumentParser(
|
2024-07-25 11:01:05 +00:00
|
|
|
prog='funkwlplay',
|
2024-07-24 09:52:12 +00:00
|
|
|
description='Create playlist from query or just random playlist tracks from funkwhale instances')
|
2024-07-25 11:01:05 +00:00
|
|
|
parser.add_argument('-s', '--search', help='This global search on funkwhale instances, it matches artists, albums, tracks, etc...')
|
|
|
|
parser.add_argument('-t', '--tag', help='This tag search, use this as genre search')
|
|
|
|
parser.add_argument('-i', '--instance', help='Specify instance, by default search on all instances in instances file')
|
|
|
|
parser.add_argument('-r', '--recursion', type=int, default=0, help='Use recursion if instance contain more than 50 tracks')
|
|
|
|
parser.add_argument('-d', '--depth', type=int, default=5, help='Depth of recursion, default is 5 pages, 250 tracks')
|
2024-07-24 09:52:12 +00:00
|
|
|
args = parser.parse_args()
|
2024-07-24 12:13:31 +00:00
|
|
|
if args.instance:
|
|
|
|
instances = [args.instance]
|
2024-07-24 09:52:12 +00:00
|
|
|
|
|
|
|
|
|
|
|
def create_playlist_file(track_list):
|
2024-07-25 11:15:55 +00:00
|
|
|
filename = 'playlist.m3u8'
|
|
|
|
with open(filename, 'w') as file:
|
2024-07-24 09:52:12 +00:00
|
|
|
file.write('#EXTM3U\n')
|
|
|
|
for i in track_list:
|
|
|
|
file.write('\n' + i)
|
2024-07-25 11:15:55 +00:00
|
|
|
print(f'Playlist saved as {filename}')
|
2024-07-24 09:52:12 +00:00
|
|
|
|
|
|
|
|
|
|
|
def filter_tracks(tracks):
|
2024-07-25 10:38:32 +00:00
|
|
|
def remove_unreach_tracks(track):
|
|
|
|
try:
|
|
|
|
r = requests.head(track['listen_url'], timeout=1)
|
|
|
|
r.raise_for_status()
|
|
|
|
return 1
|
|
|
|
except:
|
|
|
|
return 0
|
|
|
|
with concurrent.futures.ThreadPoolExecutor(max_workers=50) as executor:
|
|
|
|
res = [executor.submit(remove_unreach_tracks, track) for track in tracks]
|
|
|
|
concurrent.futures.wait(res)
|
|
|
|
avalaible = []
|
|
|
|
for idx, track in enumerate(tracks):
|
|
|
|
is_avalaible = res[idx].result()
|
|
|
|
if is_avalaible == 1:
|
|
|
|
avalaible.append(track)
|
|
|
|
tracks = avalaible
|
|
|
|
|
2024-07-24 09:52:12 +00:00
|
|
|
Path('filter_tags').touch()
|
|
|
|
Path('filter_artists').touch()
|
|
|
|
Path('filter_raw_urls').touch()
|
|
|
|
with open('filter_tags') as tags_file:
|
|
|
|
block_tags = tags_file.read().strip().split('\n')
|
|
|
|
|
|
|
|
with open('filter_artists') as artists_file:
|
|
|
|
block_artists = artists_file.read().strip().split('\n')
|
|
|
|
|
|
|
|
with open('filter_raw_urls') as raw_urls_file:
|
|
|
|
block_raw_urls = raw_urls_file.read().strip().split('\n')
|
2024-07-25 14:52:15 +00:00
|
|
|
filtred_tracks = []
|
2024-07-24 09:52:12 +00:00
|
|
|
for i in tracks:
|
|
|
|
if [tag.lower() for tag in i['tags']] in block_tags:
|
|
|
|
continue
|
|
|
|
if i['artist']['name'].lower() in block_artists:
|
|
|
|
continue
|
|
|
|
if i['listen_url'].lower() in block_raw_urls:
|
|
|
|
continue
|
2024-07-25 14:52:15 +00:00
|
|
|
filtred_tracks.append(i)
|
|
|
|
return filtred_tracks
|
2024-07-24 06:34:06 +00:00
|
|
|
|
|
|
|
|
2024-07-24 14:36:00 +00:00
|
|
|
def search_tracks_on_instance(instance, tag='', query='', recursion=args.recursion):
|
2024-07-25 11:38:57 +00:00
|
|
|
r = requests.get(f'https://{instance}/api/v1/tracks', params={'tag': tag, 'q': query,
|
2024-07-24 09:52:12 +00:00
|
|
|
'local': True, 'playable': True,
|
2024-07-25 11:38:57 +00:00
|
|
|
'ordering': 'random'}, timeout=10)
|
|
|
|
r.raise_for_status()
|
|
|
|
tracks = r.json()
|
|
|
|
|
2024-07-24 14:36:00 +00:00
|
|
|
count = tracks['count']
|
|
|
|
print(f'found {count} tracks on {instance}')
|
|
|
|
if recursion == 1:
|
|
|
|
recursion_limit = 0
|
|
|
|
while tracks['next']:
|
|
|
|
try:
|
2024-07-24 14:51:53 +00:00
|
|
|
if recursion_limit >= args.depth:
|
2024-07-24 14:36:00 +00:00
|
|
|
break
|
|
|
|
new_tracks = requests.get(tracks['next']).json()
|
|
|
|
tracks['results'] += new_tracks['results']
|
|
|
|
tracks['next'] = new_tracks['next']
|
|
|
|
recursion_limit += 1
|
|
|
|
except Exception as E:
|
|
|
|
print(E)
|
2024-07-24 09:52:12 +00:00
|
|
|
tracks_replacer = []
|
|
|
|
for track in tracks['results']:
|
|
|
|
track['listen_url'] = f'https://{instance}' + track['listen_url']
|
|
|
|
tracks_replacer.append(track)
|
|
|
|
tracks['results'] = tracks_replacer
|
2024-07-24 06:34:06 +00:00
|
|
|
return tracks
|
|
|
|
|
|
|
|
|
2024-07-24 09:52:12 +00:00
|
|
|
with concurrent.futures.ThreadPoolExecutor(max_workers=50) as executor:
|
|
|
|
res = [executor.submit(search_tracks_on_instance, instance, args.tag, args.search) for instance in instances]
|
|
|
|
concurrent.futures.wait(res)
|
|
|
|
playlist_files = []
|
|
|
|
for idx, instance in enumerate(instances):
|
|
|
|
try:
|
|
|
|
tracks = res[idx].result()
|
2024-07-25 14:52:15 +00:00
|
|
|
before_filter = len(tracks['results'])
|
|
|
|
filtred_tracks = filter_tracks(tracks['results'])
|
|
|
|
after_filter = before_filter - len(filtred_tracks)
|
|
|
|
print(f'{after_filter} tracks filtred on {instance}')
|
|
|
|
tracks_stor = filtred_tracks
|
2024-07-24 09:52:12 +00:00
|
|
|
except Exception as E:
|
|
|
|
print(E)
|
|
|
|
for track in tracks_stor:
|
2024-07-25 16:00:21 +00:00
|
|
|
artist, album, title, play_url, track_duration = track['artist']['name'], track['album']['title'], track['title'], track['listen_url'], track.get('duration')
|
|
|
|
if not track_duration:
|
|
|
|
track_duration = -1
|
|
|
|
playlist_files.append(f'#EXTINF:{track_duration},{artist} - {album} - {title}\n{play_url}')
|
2024-07-24 09:52:12 +00:00
|
|
|
create_playlist_file(playlist_files)
|