2016-08-27 18:08:16 +02:00
|
|
|
|
#!/usr/bin/env python3
|
2016-08-27 21:02:59 +02:00
|
|
|
|
|
|
|
|
|
|
# Builtin libraries
|
2013-10-11 20:34:14 +05:30
|
|
|
|
import sys
|
|
|
|
|
|
import struct
|
2016-08-27 17:42:01 +02:00
|
|
|
|
import urllib.request, urllib.parse, urllib.error
|
2013-10-11 20:34:14 +05:30
|
|
|
|
import os
|
|
|
|
|
|
import hashlib
|
|
|
|
|
|
import subprocess
|
|
|
|
|
|
import collections
|
|
|
|
|
|
import errno
|
|
|
|
|
|
import argparse
|
2014-06-24 04:26:06 +04:00
|
|
|
|
import shutil
|
|
|
|
|
|
import re
|
|
|
|
|
|
import tempfile
|
2016-01-23 22:25:59 +05:30
|
|
|
|
import signal
|
2021-09-01 12:30:00 -04:00
|
|
|
|
import enum
|
|
|
|
|
|
import functools
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
2016-08-27 21:02:59 +02:00
|
|
|
|
# External libraries
|
|
|
|
|
|
try:
|
|
|
|
|
|
import mutagen
|
|
|
|
|
|
except ImportError:
|
|
|
|
|
|
mutagen = None
|
|
|
|
|
|
|
2021-09-01 12:30:00 -04:00
|
|
|
|
class PlaylistType(enum.Enum):
|
|
|
|
|
|
ALL_SONGS = 1
|
|
|
|
|
|
NORMAL = 2
|
|
|
|
|
|
PODCAST = 3
|
|
|
|
|
|
AUDIOBOOK = 4
|
|
|
|
|
|
|
|
|
|
|
|
class FileType(enum.Enum):
|
|
|
|
|
|
MP3 = (1, {'.mp3'})
|
|
|
|
|
|
AAC = (2, {'.m4a', '.m4b', '.m4p', '.aa'})
|
|
|
|
|
|
WAV = (4, {'.wav'})
|
|
|
|
|
|
def __init__(self, filetype, extensions):
|
|
|
|
|
|
self.filetype = filetype
|
|
|
|
|
|
self.extensions = extensions
|
|
|
|
|
|
|
2021-09-01 12:30:00 -04:00
|
|
|
|
# collect all the supported audio extensions
|
2021-09-01 12:30:00 -04:00
|
|
|
|
audio_ext = functools.reduce(lambda j,k: j.union(k), map(lambda i: i.extensions, FileType))
|
2021-09-01 12:30:00 -04:00
|
|
|
|
# the supported playlist extensions
|
2021-09-01 12:30:00 -04:00
|
|
|
|
list_ext = {".pls", ".m3u"}
|
2021-09-01 12:30:00 -04:00
|
|
|
|
# all the supported file extensions
|
2021-09-01 12:30:00 -04:00
|
|
|
|
all_ext = audio_ext.union(list_ext)
|
|
|
|
|
|
|
2013-10-12 18:32:00 +05:30
|
|
|
|
def make_dir_if_absent(path):
|
|
|
|
|
|
try:
|
|
|
|
|
|
os.makedirs(path)
|
|
|
|
|
|
except OSError as exc:
|
|
|
|
|
|
if exc.errno != errno.EEXIST:
|
|
|
|
|
|
raise
|
|
|
|
|
|
|
|
|
|
|
|
def raises_unicode_error(str):
|
|
|
|
|
|
try:
|
2020-06-09 23:21:38 +02:00
|
|
|
|
str.encode('latin-1')
|
2013-10-12 18:32:00 +05:30
|
|
|
|
return False
|
2014-06-24 04:03:12 +04:00
|
|
|
|
except (UnicodeEncodeError, UnicodeDecodeError):
|
2013-10-12 18:32:00 +05:30
|
|
|
|
return True
|
|
|
|
|
|
|
2013-10-12 23:42:34 +05:30
|
|
|
|
def hash_error_unicode(item):
|
2020-06-09 23:21:38 +02:00
|
|
|
|
item_bytes = item.encode('utf-8')
|
|
|
|
|
|
return "".join(["{0:02X}".format(ord(x)) for x in reversed(hashlib.md5(item_bytes).hexdigest()[:8])])
|
2013-10-12 23:42:34 +05:30
|
|
|
|
|
|
|
|
|
|
def validate_unicode(path):
|
|
|
|
|
|
path_list = path.split('/')
|
|
|
|
|
|
last_raise = False
|
2016-08-27 17:42:01 +02:00
|
|
|
|
for i in range(len(path_list)):
|
2013-10-12 23:42:34 +05:30
|
|
|
|
if raises_unicode_error(path_list[i]):
|
|
|
|
|
|
path_list[i] = hash_error_unicode(path_list[i])
|
|
|
|
|
|
last_raise = True
|
|
|
|
|
|
else:
|
|
|
|
|
|
last_raise = False
|
|
|
|
|
|
extension = os.path.splitext(path)[1].lower()
|
|
|
|
|
|
return "/".join(path_list) + (extension if last_raise and extension in audio_ext else '')
|
|
|
|
|
|
|
2016-01-13 04:08:49 +05:30
|
|
|
|
def exec_exists_in_path(command):
|
|
|
|
|
|
with open(os.devnull, 'w') as FNULL:
|
|
|
|
|
|
try:
|
2016-03-20 14:43:27 +00:00
|
|
|
|
with open(os.devnull, 'r') as RFNULL:
|
|
|
|
|
|
subprocess.call([command], stdout=FNULL, stderr=subprocess.STDOUT, stdin=RFNULL)
|
|
|
|
|
|
return True
|
2016-01-13 04:08:49 +05:30
|
|
|
|
except OSError as e:
|
|
|
|
|
|
return False
|
|
|
|
|
|
|
2016-06-08 03:43:32 +05:30
|
|
|
|
def splitpath(path):
|
|
|
|
|
|
return path.split(os.sep)
|
|
|
|
|
|
|
|
|
|
|
|
def get_relpath(path, basepath):
|
2016-08-27 17:42:01 +02:00
|
|
|
|
commonprefix = os.sep.join(os.path.commonprefix(list(map(splitpath, [path, basepath]))))
|
2016-06-08 03:43:32 +05:30
|
|
|
|
return os.path.relpath(path, commonprefix)
|
|
|
|
|
|
|
|
|
|
|
|
def is_path_prefix(prefix, path):
|
2016-08-27 17:42:01 +02:00
|
|
|
|
return prefix == os.sep.join(os.path.commonprefix(list(map(splitpath, [prefix, path]))))
|
2016-06-08 03:43:32 +05:30
|
|
|
|
|
2016-06-08 05:14:58 +05:30
|
|
|
|
def group_tracks_by_id3_template(tracks, template):
|
|
|
|
|
|
grouped_tracks_dict = {}
|
|
|
|
|
|
template_vars = set(re.findall(r'{.*?}', template))
|
|
|
|
|
|
for track in tracks:
|
|
|
|
|
|
try:
|
|
|
|
|
|
id3_dict = mutagen.File(track, easy=True)
|
|
|
|
|
|
except:
|
|
|
|
|
|
id3_dict = {}
|
|
|
|
|
|
|
|
|
|
|
|
key = template
|
|
|
|
|
|
single_var_present = False
|
|
|
|
|
|
for var in template_vars:
|
|
|
|
|
|
val = id3_dict.get(var[1:-1], [''])[0]
|
|
|
|
|
|
if len(val) > 0:
|
|
|
|
|
|
single_var_present = True
|
|
|
|
|
|
key = key.replace(var, val)
|
|
|
|
|
|
|
|
|
|
|
|
if single_var_present:
|
|
|
|
|
|
if key not in grouped_tracks_dict:
|
|
|
|
|
|
grouped_tracks_dict[key] = []
|
|
|
|
|
|
grouped_tracks_dict[key].append(track)
|
|
|
|
|
|
|
|
|
|
|
|
return sorted(grouped_tracks_dict.items())
|
|
|
|
|
|
|
2014-06-24 04:26:06 +04:00
|
|
|
|
class Text2Speech(object):
|
2016-03-20 14:43:27 +00:00
|
|
|
|
valid_tts = {'pico2wave': True, 'RHVoice': True, 'espeak': True}
|
2016-01-13 04:08:49 +05:30
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
|
def check_support():
|
2016-01-24 13:07:50 +01:00
|
|
|
|
voiceoverAvailable = False
|
|
|
|
|
|
|
|
|
|
|
|
# Check for pico2wave voiceover
|
2016-01-13 04:08:49 +05:30
|
|
|
|
if not exec_exists_in_path("pico2wave"):
|
|
|
|
|
|
Text2Speech.valid_tts['pico2wave'] = False
|
2016-08-27 17:42:01 +02:00
|
|
|
|
print("Warning: pico2wave not found, voicever won't be generated using it.")
|
2016-01-24 13:07:50 +01:00
|
|
|
|
else:
|
|
|
|
|
|
voiceoverAvailable = True
|
|
|
|
|
|
|
2016-03-20 14:43:27 +00:00
|
|
|
|
# Check for espeak voiceover
|
|
|
|
|
|
if not exec_exists_in_path("espeak"):
|
|
|
|
|
|
Text2Speech.valid_tts['espeak'] = False
|
2016-08-27 17:42:01 +02:00
|
|
|
|
print("Warning: espeak not found, voicever won't be generated using it.")
|
2016-03-20 14:43:27 +00:00
|
|
|
|
else:
|
|
|
|
|
|
voiceoverAvailable = True
|
|
|
|
|
|
|
2016-01-24 13:07:50 +01:00
|
|
|
|
# Check for Russian RHVoice voiceover
|
2016-01-13 04:08:49 +05:30
|
|
|
|
if not exec_exists_in_path("RHVoice"):
|
|
|
|
|
|
Text2Speech.valid_tts['RHVoice'] = False
|
2016-08-27 17:42:01 +02:00
|
|
|
|
print("Warning: RHVoice not found, Russian voicever won't be generated.")
|
2016-01-24 13:07:50 +01:00
|
|
|
|
else:
|
|
|
|
|
|
voiceoverAvailable = True
|
|
|
|
|
|
|
|
|
|
|
|
# Return if we at least found one voiceover program.
|
|
|
|
|
|
# Otherwise this will result in silent voiceover for tracks and "Playlist N" for playlists.
|
|
|
|
|
|
return voiceoverAvailable
|
2014-06-24 04:26:06 +04:00
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
|
def text2speech(out_wav_path, text):
|
2016-02-08 00:40:34 +05:30
|
|
|
|
# Skip voiceover generation if a track with the same name is used.
|
2016-01-17 20:45:58 +01:00
|
|
|
|
# This might happen with "Track001" or "01. Intro" names for example.
|
|
|
|
|
|
if os.path.isfile(out_wav_path):
|
2016-08-27 14:39:25 +02:00
|
|
|
|
verboseprint("Using existing", out_wav_path)
|
2016-01-18 18:33:13 +01:00
|
|
|
|
return True
|
2016-01-17 20:45:58 +01:00
|
|
|
|
|
2014-06-24 04:26:06 +04:00
|
|
|
|
# ensure we deal with unicode later
|
2016-08-27 17:42:01 +02:00
|
|
|
|
if not isinstance(text, str):
|
|
|
|
|
|
text = str(text, 'utf-8')
|
2014-06-24 04:26:06 +04:00
|
|
|
|
lang = Text2Speech.guess_lang(text)
|
|
|
|
|
|
if lang == "ru-RU":
|
2016-03-24 21:45:50 +05:30
|
|
|
|
return Text2Speech.rhvoice(out_wav_path, text)
|
2014-06-24 04:26:06 +04:00
|
|
|
|
else:
|
2016-03-24 21:45:50 +05:30
|
|
|
|
if Text2Speech.pico2wave(out_wav_path, text):
|
|
|
|
|
|
return True
|
|
|
|
|
|
elif Text2Speech.espeak(out_wav_path, text):
|
|
|
|
|
|
return True
|
2016-03-20 14:43:27 +00:00
|
|
|
|
else:
|
|
|
|
|
|
return False
|
2014-06-24 04:26:06 +04:00
|
|
|
|
|
|
|
|
|
|
# guess-language seems like an overkill for now
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
|
def guess_lang(unicodetext):
|
|
|
|
|
|
lang = 'en-GB'
|
2016-08-27 17:42:01 +02:00
|
|
|
|
if re.search("[А-Яа-я]", unicodetext) is not None:
|
2014-06-24 04:26:06 +04:00
|
|
|
|
lang = 'ru-RU'
|
|
|
|
|
|
return lang
|
|
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
|
def pico2wave(out_wav_path, unicodetext):
|
2016-01-13 04:08:49 +05:30
|
|
|
|
if not Text2Speech.valid_tts['pico2wave']:
|
|
|
|
|
|
return False
|
2014-06-24 04:26:06 +04:00
|
|
|
|
subprocess.call(["pico2wave", "-l", "en-GB", "-w", out_wav_path, unicodetext])
|
2016-01-17 20:45:58 +01:00
|
|
|
|
return True
|
2014-06-24 04:26:06 +04:00
|
|
|
|
|
2016-03-20 14:43:27 +00:00
|
|
|
|
@staticmethod
|
|
|
|
|
|
def espeak(out_wav_path, unicodetext):
|
|
|
|
|
|
if not Text2Speech.valid_tts['espeak']:
|
|
|
|
|
|
return False
|
2016-03-25 04:51:00 +05:30
|
|
|
|
subprocess.call(["espeak", "-v", "english_rp", "-s", "150", "-w", out_wav_path, unicodetext])
|
2016-03-20 14:43:27 +00:00
|
|
|
|
return True
|
|
|
|
|
|
|
2014-06-24 04:26:06 +04:00
|
|
|
|
@staticmethod
|
|
|
|
|
|
def rhvoice(out_wav_path, unicodetext):
|
2016-01-13 04:08:49 +05:30
|
|
|
|
if not Text2Speech.valid_tts['RHVoice']:
|
|
|
|
|
|
return False
|
|
|
|
|
|
|
2014-06-24 04:26:06 +04:00
|
|
|
|
tmp_file = tempfile.NamedTemporaryFile(suffix=".wav", delete=False)
|
|
|
|
|
|
tmp_file.close()
|
|
|
|
|
|
|
|
|
|
|
|
proc = subprocess.Popen(["RHVoice", "--voice=Elena", "--variant=Russian", "--volume=100", "-o", tmp_file.name], stdin=subprocess.PIPE)
|
|
|
|
|
|
proc.communicate(input=unicodetext.encode('utf-8'))
|
|
|
|
|
|
# make a little bit louder to be comparable with pico2wave
|
|
|
|
|
|
subprocess.call(["sox", tmp_file.name, out_wav_path, "norm"])
|
|
|
|
|
|
|
|
|
|
|
|
os.remove(tmp_file.name)
|
2016-01-17 20:45:58 +01:00
|
|
|
|
return True
|
2014-06-24 04:26:06 +04:00
|
|
|
|
|
2013-10-12 23:42:34 +05:30
|
|
|
|
|
2013-10-11 20:34:14 +05:30
|
|
|
|
class Record(object):
|
|
|
|
|
|
|
|
|
|
|
|
def __init__(self, parent):
|
|
|
|
|
|
self.parent = parent
|
|
|
|
|
|
self._struct = collections.OrderedDict([])
|
|
|
|
|
|
self._fields = {}
|
2016-08-27 13:19:21 +02:00
|
|
|
|
self.track_voiceover = parent.track_voiceover
|
2016-04-06 22:08:28 +02:00
|
|
|
|
self.playlist_voiceover = parent.playlist_voiceover
|
2013-10-12 23:42:34 +05:30
|
|
|
|
self.rename = parent.rename
|
2014-06-24 15:10:57 +04:00
|
|
|
|
self.trackgain = parent.trackgain
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
|
|
|
|
|
def __getitem__(self, item):
|
2016-08-27 17:42:01 +02:00
|
|
|
|
if item not in list(self._struct.keys()):
|
2013-10-11 20:34:14 +05:30
|
|
|
|
raise KeyError
|
|
|
|
|
|
return self._fields.get(item, self._struct[item][1])
|
|
|
|
|
|
|
|
|
|
|
|
def __setitem__(self, item, value):
|
|
|
|
|
|
self._fields[item] = value
|
|
|
|
|
|
|
|
|
|
|
|
def construct(self):
|
2016-08-27 18:08:16 +02:00
|
|
|
|
output = bytes()
|
2016-08-27 17:42:01 +02:00
|
|
|
|
for i in list(self._struct.keys()):
|
2013-10-11 20:34:14 +05:30
|
|
|
|
(fmt, default) = self._struct[i]
|
|
|
|
|
|
output += struct.pack("<" + fmt, self._fields.get(i, default))
|
|
|
|
|
|
return output
|
|
|
|
|
|
|
|
|
|
|
|
def text_to_speech(self, text, dbid, playlist = False):
|
2016-08-27 13:19:21 +02:00
|
|
|
|
if self.track_voiceover and not playlist or self.playlist_voiceover and playlist:
|
2013-10-11 20:34:14 +05:30
|
|
|
|
# Create the voiceover wav file
|
2016-08-27 18:08:16 +02:00
|
|
|
|
fn = ''.join(format(x, '02x') for x in reversed(dbid))
|
2013-10-11 20:34:14 +05:30
|
|
|
|
path = os.path.join(self.base, "iPod_Control", "Speakable", "Tracks" if not playlist else "Playlists", fn + ".wav")
|
2016-01-17 20:45:58 +01:00
|
|
|
|
return Text2Speech.text2speech(path, text)
|
|
|
|
|
|
return False
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
|
|
|
|
|
def path_to_ipod(self, filename):
|
|
|
|
|
|
if os.path.commonprefix([os.path.abspath(filename), self.base]) != self.base:
|
|
|
|
|
|
raise IOError("Cannot get Ipod filename, since file is outside the IPOD path")
|
|
|
|
|
|
baselen = len(self.base)
|
|
|
|
|
|
if self.base.endswith(os.path.sep):
|
|
|
|
|
|
baselen -= 1
|
|
|
|
|
|
ipodname = "/".join(os.path.abspath(filename)[baselen:].split(os.path.sep))
|
|
|
|
|
|
return ipodname
|
|
|
|
|
|
|
|
|
|
|
|
def ipod_to_path(self, ipodname):
|
|
|
|
|
|
return os.path.abspath(os.path.join(self.base, os.path.sep.join(ipodname.split("/"))))
|
|
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
|
def shuffledb(self):
|
|
|
|
|
|
parent = self.parent
|
|
|
|
|
|
while parent.__class__ != Shuffler:
|
|
|
|
|
|
parent = parent.parent
|
|
|
|
|
|
return parent
|
|
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
|
def base(self):
|
2016-06-08 02:57:04 +05:30
|
|
|
|
return self.shuffledb.path
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
|
def tracks(self):
|
|
|
|
|
|
return self.shuffledb.tracks
|
|
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
|
def albums(self):
|
|
|
|
|
|
return self.shuffledb.albums
|
|
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
|
def artists(self):
|
|
|
|
|
|
return self.shuffledb.artists
|
|
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
|
def lists(self):
|
|
|
|
|
|
return self.shuffledb.lists
|
|
|
|
|
|
|
|
|
|
|
|
class TunesSD(Record):
|
|
|
|
|
|
def __init__(self, parent):
|
|
|
|
|
|
Record.__init__(self, parent)
|
|
|
|
|
|
self.track_header = TrackHeader(self)
|
|
|
|
|
|
self.play_header = PlaylistHeader(self)
|
|
|
|
|
|
self._struct = collections.OrderedDict([
|
2016-08-27 18:08:16 +02:00
|
|
|
|
("header_id", ("4s", b"bdhs")), # shdb
|
2016-01-24 12:14:14 +01:00
|
|
|
|
("unknown1", ("I", 0x02000003)),
|
2013-10-11 20:34:14 +05:30
|
|
|
|
("total_length", ("I", 64)),
|
|
|
|
|
|
("total_number_of_tracks", ("I", 0)),
|
|
|
|
|
|
("total_number_of_playlists", ("I", 0)),
|
|
|
|
|
|
("unknown2", ("Q", 0)),
|
|
|
|
|
|
("max_volume", ("B", 0)),
|
2016-08-27 13:19:21 +02:00
|
|
|
|
("voiceover_enabled", ("B", int(self.track_voiceover))),
|
2013-10-11 20:34:14 +05:30
|
|
|
|
("unknown3", ("H", 0)),
|
|
|
|
|
|
("total_tracks_without_podcasts", ("I", 0)),
|
|
|
|
|
|
("track_header_offset", ("I", 64)),
|
|
|
|
|
|
("playlist_header_offset", ("I", 0)),
|
2016-08-27 18:08:16 +02:00
|
|
|
|
("unknown4", ("20s", b"\x00" * 20)),
|
2013-10-11 20:34:14 +05:30
|
|
|
|
])
|
|
|
|
|
|
|
|
|
|
|
|
def construct(self):
|
2016-01-17 12:15:27 +01:00
|
|
|
|
# The header is a fixed length, so no need to calculate it
|
2013-10-11 20:34:14 +05:30
|
|
|
|
self.track_header.base_offset = 64
|
|
|
|
|
|
track_header = self.track_header.construct()
|
|
|
|
|
|
|
|
|
|
|
|
# The playlist offset will depend on the number of tracks
|
|
|
|
|
|
self.play_header.base_offset = self.track_header.base_offset + len(track_header)
|
|
|
|
|
|
play_header = self.play_header.construct(self.track_header.tracks)
|
|
|
|
|
|
self["playlist_header_offset"] = self.play_header.base_offset
|
|
|
|
|
|
|
|
|
|
|
|
self["total_number_of_tracks"] = self.track_header["number_of_tracks"]
|
2021-08-25 13:57:01 -04:00
|
|
|
|
self["total_tracks_without_podcasts"] = self.track_header.total_tracks_without_podcasts()
|
2013-10-11 20:34:14 +05:30
|
|
|
|
self["total_number_of_playlists"] = self.play_header["number_of_playlists"]
|
|
|
|
|
|
|
|
|
|
|
|
output = Record.construct(self)
|
|
|
|
|
|
return output + track_header + play_header
|
|
|
|
|
|
|
|
|
|
|
|
class TrackHeader(Record):
|
|
|
|
|
|
def __init__(self, parent):
|
|
|
|
|
|
self.base_offset = 0
|
2021-08-25 13:57:01 -04:00
|
|
|
|
self.total_podcasts = 0
|
2013-10-11 20:34:14 +05:30
|
|
|
|
Record.__init__(self, parent)
|
|
|
|
|
|
self._struct = collections.OrderedDict([
|
2016-08-27 18:08:16 +02:00
|
|
|
|
("header_id", ("4s", b"hths")), # shth
|
2013-10-11 20:34:14 +05:30
|
|
|
|
("total_length", ("I", 0)),
|
|
|
|
|
|
("number_of_tracks", ("I", 0)),
|
|
|
|
|
|
("unknown1", ("Q", 0)),
|
|
|
|
|
|
])
|
|
|
|
|
|
|
|
|
|
|
|
def construct(self):
|
|
|
|
|
|
self["number_of_tracks"] = len(self.tracks)
|
|
|
|
|
|
self["total_length"] = 20 + (len(self.tracks) * 4)
|
|
|
|
|
|
output = Record.construct(self)
|
2021-09-01 13:18:52 -04:00
|
|
|
|
self.total_podcasts = 0
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
|
|
|
|
|
# Construct the underlying tracks
|
2016-08-27 18:08:16 +02:00
|
|
|
|
track_chunk = bytes()
|
2013-10-11 20:34:14 +05:30
|
|
|
|
for i in self.tracks:
|
|
|
|
|
|
track = Track(self)
|
2016-08-27 14:39:25 +02:00
|
|
|
|
verboseprint("[*] Adding track", i)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
track.populate(i)
|
2021-08-25 13:57:01 -04:00
|
|
|
|
if track.is_podcast:
|
|
|
|
|
|
self.total_podcasts += 1
|
2013-10-11 20:34:14 +05:30
|
|
|
|
output += struct.pack("I", self.base_offset + self["total_length"] + len(track_chunk))
|
|
|
|
|
|
track_chunk += track.construct()
|
|
|
|
|
|
return output + track_chunk
|
|
|
|
|
|
|
2021-08-25 13:57:01 -04:00
|
|
|
|
def total_tracks_without_podcasts(self):
|
|
|
|
|
|
return self["number_of_tracks"] - self.total_podcasts
|
|
|
|
|
|
|
2013-10-11 20:34:14 +05:30
|
|
|
|
class Track(Record):
|
|
|
|
|
|
|
|
|
|
|
|
def __init__(self, parent):
|
|
|
|
|
|
Record.__init__(self, parent)
|
2021-08-25 13:57:01 -04:00
|
|
|
|
self.is_podcast = False
|
2013-10-11 20:34:14 +05:30
|
|
|
|
self._struct = collections.OrderedDict([
|
2016-08-27 18:08:16 +02:00
|
|
|
|
("header_id", ("4s", b"rths")), # shtr
|
2013-10-11 20:34:14 +05:30
|
|
|
|
("header_length", ("I", 0x174)),
|
|
|
|
|
|
("start_at_pos_ms", ("I", 0)),
|
|
|
|
|
|
("stop_at_pos_ms", ("I", 0)),
|
2014-06-24 15:10:57 +04:00
|
|
|
|
("volume_gain", ("I", int(self.trackgain))),
|
2013-10-11 20:34:14 +05:30
|
|
|
|
("filetype", ("I", 1)),
|
2016-08-27 18:08:16 +02:00
|
|
|
|
("filename", ("256s", b"\x00" * 256)),
|
2013-10-11 20:34:14 +05:30
|
|
|
|
("bookmark", ("I", 0)),
|
|
|
|
|
|
("dontskip", ("B", 1)),
|
|
|
|
|
|
("remember", ("B", 0)),
|
|
|
|
|
|
("unintalbum", ("B", 0)),
|
|
|
|
|
|
("unknown", ("B", 0)),
|
|
|
|
|
|
("pregap", ("I", 0x200)),
|
|
|
|
|
|
("postgap", ("I", 0x200)),
|
|
|
|
|
|
("numsamples", ("I", 0)),
|
|
|
|
|
|
("unknown2", ("I", 0)),
|
|
|
|
|
|
("gapless", ("I", 0)),
|
|
|
|
|
|
("unknown3", ("I", 0)),
|
|
|
|
|
|
("albumid", ("I", 0)),
|
|
|
|
|
|
("track", ("H", 1)),
|
|
|
|
|
|
("disc", ("H", 0)),
|
|
|
|
|
|
("unknown4", ("Q", 0)),
|
|
|
|
|
|
("dbid", ("8s", 0)),
|
|
|
|
|
|
("artistid", ("I", 0)),
|
2016-08-27 18:08:16 +02:00
|
|
|
|
("unknown5", ("32s", b"\x00" * 32)),
|
2013-10-11 20:34:14 +05:30
|
|
|
|
])
|
|
|
|
|
|
|
2021-09-01 12:30:00 -04:00
|
|
|
|
def set_podcast(self):
|
|
|
|
|
|
self.is_podcast = True
|
|
|
|
|
|
self["dontskip"] = 0 # podcasts should not be "not skipped" when shuffling (re: should not be shuffled)
|
|
|
|
|
|
self["remember"] = 1 # podcasts should remember their last playback position
|
|
|
|
|
|
|
2013-10-11 20:34:14 +05:30
|
|
|
|
def populate(self, filename):
|
2016-08-27 18:08:16 +02:00
|
|
|
|
self["filename"] = self.path_to_ipod(filename).encode('utf-8')
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
2021-09-01 12:30:00 -04:00
|
|
|
|
# assign the "filetype" based on the extension
|
|
|
|
|
|
ext = os.path.splitext(filename)[1].lower()
|
|
|
|
|
|
for type in FileType:
|
|
|
|
|
|
if ext in type.extensions:
|
|
|
|
|
|
self.filetype = type.filetype
|
|
|
|
|
|
break
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
2021-08-25 13:57:01 -04:00
|
|
|
|
if "/iPod_Control/Podcasts/" in filename:
|
2021-09-01 12:30:00 -04:00
|
|
|
|
self.set_podcast()
|
2021-08-25 13:57:01 -04:00
|
|
|
|
|
2013-10-11 20:34:14 +05:30
|
|
|
|
text = os.path.splitext(os.path.basename(filename))[0]
|
|
|
|
|
|
|
2016-08-27 21:02:59 +02:00
|
|
|
|
# Try to get album and artist information with mutagen
|
|
|
|
|
|
if mutagen:
|
|
|
|
|
|
audio = None
|
|
|
|
|
|
try:
|
|
|
|
|
|
audio = mutagen.File(filename, easy = True)
|
|
|
|
|
|
except:
|
|
|
|
|
|
print("Error calling mutagen. Possible invalid filename/ID3Tags (hyphen in filename?)")
|
|
|
|
|
|
if audio:
|
2021-08-25 13:57:01 -04:00
|
|
|
|
if "Podcast" in audio.get("genre", ["Unknown"]):
|
2021-09-01 12:30:00 -04:00
|
|
|
|
self.set_podcast()
|
2021-08-25 13:57:01 -04:00
|
|
|
|
|
2016-08-27 21:02:59 +02:00
|
|
|
|
# Note: Rythmbox IPod plugin sets this value always 0.
|
|
|
|
|
|
self["stop_at_pos_ms"] = int(audio.info.length * 1000)
|
|
|
|
|
|
|
|
|
|
|
|
artist = audio.get("artist", ["Unknown"])[0]
|
|
|
|
|
|
if artist in self.artists:
|
|
|
|
|
|
self["artistid"] = self.artists.index(artist)
|
|
|
|
|
|
else:
|
|
|
|
|
|
self["artistid"] = len(self.artists)
|
|
|
|
|
|
self.artists.append(artist)
|
|
|
|
|
|
|
|
|
|
|
|
album = audio.get("album", ["Unknown"])[0]
|
|
|
|
|
|
if album in self.albums:
|
|
|
|
|
|
self["albumid"] = self.albums.index(album)
|
|
|
|
|
|
else:
|
|
|
|
|
|
self["albumid"] = len(self.albums)
|
|
|
|
|
|
self.albums.append(album)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
2016-08-27 21:02:59 +02:00
|
|
|
|
if audio.get("title", "") and audio.get("artist", ""):
|
|
|
|
|
|
text = " - ".join(audio.get("title", "") + audio.get("artist", ""))
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
|
|
|
|
|
# Handle the VoiceOverData
|
2016-08-27 17:42:01 +02:00
|
|
|
|
if isinstance(text, str):
|
2014-06-24 04:26:06 +04:00
|
|
|
|
text = text.encode('utf-8', 'ignore')
|
2016-08-27 21:16:14 +02:00
|
|
|
|
self["dbid"] = hashlib.md5(text).digest()[:8]
|
2013-10-11 20:34:14 +05:30
|
|
|
|
self.text_to_speech(text, self["dbid"])
|
|
|
|
|
|
|
|
|
|
|
|
class PlaylistHeader(Record):
|
|
|
|
|
|
def __init__(self, parent):
|
|
|
|
|
|
self.base_offset = 0
|
|
|
|
|
|
Record.__init__(self, parent)
|
|
|
|
|
|
self._struct = collections.OrderedDict([
|
2016-08-27 18:08:16 +02:00
|
|
|
|
("header_id", ("4s", b"hphs")), #shph
|
2013-10-11 20:34:14 +05:30
|
|
|
|
("total_length", ("I", 0)),
|
|
|
|
|
|
("number_of_playlists", ("I", 0)),
|
2021-08-25 13:57:01 -04:00
|
|
|
|
("number_of_non_podcast_lists", ("H", 65535)),
|
2016-08-27 18:08:16 +02:00
|
|
|
|
("number_of_master_lists", ("2s", b"\x01\x00")),
|
|
|
|
|
|
("number_of_non_audiobook_lists", ("2s", b"\xFF\xFF")),
|
|
|
|
|
|
("unknown2", ("2s", b"\x00" * 2)),
|
2013-10-11 20:34:14 +05:30
|
|
|
|
])
|
|
|
|
|
|
|
2016-08-27 21:16:14 +02:00
|
|
|
|
def construct(self, tracks):
|
2013-10-11 20:34:14 +05:30
|
|
|
|
# Build the master list
|
|
|
|
|
|
masterlist = Playlist(self)
|
2016-08-27 14:39:25 +02:00
|
|
|
|
verboseprint("[+] Adding master playlist")
|
2013-10-11 20:34:14 +05:30
|
|
|
|
masterlist.set_master(tracks)
|
|
|
|
|
|
chunks = [masterlist.construct(tracks)]
|
|
|
|
|
|
|
|
|
|
|
|
# Build all the remaining playlists
|
|
|
|
|
|
playlistcount = 1
|
2021-08-25 13:57:01 -04:00
|
|
|
|
podcastlistcount = 0
|
2013-10-11 20:34:14 +05:30
|
|
|
|
for i in self.lists:
|
|
|
|
|
|
playlist = Playlist(self)
|
2016-08-27 14:39:25 +02:00
|
|
|
|
verboseprint("[+] Adding playlist", (i[0] if type(i) == type(()) else i))
|
2013-10-11 20:34:14 +05:30
|
|
|
|
playlist.populate(i)
|
|
|
|
|
|
construction = playlist.construct(tracks)
|
|
|
|
|
|
if playlist["number_of_songs"] > 0:
|
2021-09-01 12:30:00 -04:00
|
|
|
|
if playlist["listtype"] == PlaylistType.PODCAST.value:
|
2021-08-25 13:57:01 -04:00
|
|
|
|
podcastlistcount += 1
|
2013-10-11 20:34:14 +05:30
|
|
|
|
playlistcount += 1
|
|
|
|
|
|
chunks += [construction]
|
2016-02-04 14:36:58 +01:00
|
|
|
|
else:
|
2016-08-27 17:42:01 +02:00
|
|
|
|
print("Error: Playlist does not contain a single track. Skipping playlist.")
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
|
|
|
|
|
self["number_of_playlists"] = playlistcount
|
2021-08-25 13:57:01 -04:00
|
|
|
|
if podcastlistcount > 0:
|
2021-09-01 12:30:00 -04:00
|
|
|
|
# "number_of_non_podcast_lists" should default to 65535 if there
|
|
|
|
|
|
# aren't any podcast playlists, so only calculate the count if
|
|
|
|
|
|
# the podcastlistcount is greater than 0
|
2021-08-25 13:57:01 -04:00
|
|
|
|
self["number_of_non_podcast_lists"] = playlistcount - podcastlistcount
|
2016-01-24 12:14:14 +01:00
|
|
|
|
self["total_length"] = 0x14 + (self["number_of_playlists"] * 4)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
# Start the header
|
|
|
|
|
|
|
|
|
|
|
|
output = Record.construct(self)
|
|
|
|
|
|
offset = self.base_offset + self["total_length"]
|
|
|
|
|
|
|
|
|
|
|
|
for i in range(len(chunks)):
|
|
|
|
|
|
output += struct.pack("I", offset)
|
|
|
|
|
|
offset += len(chunks[i])
|
|
|
|
|
|
|
2016-08-27 18:08:16 +02:00
|
|
|
|
return output + b"".join(chunks)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
|
|
|
|
|
class Playlist(Record):
|
|
|
|
|
|
def __init__(self, parent):
|
|
|
|
|
|
self.listtracks = []
|
2021-09-01 12:30:00 -04:00
|
|
|
|
self.listtype = PlaylistType.NORMAL
|
2013-10-11 20:34:14 +05:30
|
|
|
|
Record.__init__(self, parent)
|
|
|
|
|
|
self._struct = collections.OrderedDict([
|
2016-08-27 18:08:16 +02:00
|
|
|
|
("header_id", ("4s", b"lphs")), # shpl
|
2013-10-11 20:34:14 +05:30
|
|
|
|
("total_length", ("I", 0)),
|
|
|
|
|
|
("number_of_songs", ("I", 0)),
|
|
|
|
|
|
("number_of_nonaudio", ("I", 0)),
|
2016-08-27 18:08:16 +02:00
|
|
|
|
("dbid", ("8s", b"\x00" * 8)),
|
2013-10-11 20:34:14 +05:30
|
|
|
|
("listtype", ("I", 2)),
|
2016-08-27 18:08:16 +02:00
|
|
|
|
("unknown1", ("16s", b"\x00" * 16))
|
2013-10-11 20:34:14 +05:30
|
|
|
|
])
|
|
|
|
|
|
|
|
|
|
|
|
def set_master(self, tracks):
|
2016-01-17 14:04:46 +01:00
|
|
|
|
# By default use "All Songs" builtin voiceover (dbid all zero)
|
|
|
|
|
|
# Else generate alternative "All Songs" to fit the speaker voice of other playlists
|
2016-04-06 22:08:28 +02:00
|
|
|
|
if self.playlist_voiceover and (Text2Speech.valid_tts['pico2wave'] or Text2Speech.valid_tts['espeak']):
|
2016-08-27 21:16:14 +02:00
|
|
|
|
self["dbid"] = hashlib.md5(b"masterlist").digest()[:8]
|
2016-01-17 14:04:46 +01:00
|
|
|
|
self.text_to_speech("All songs", self["dbid"], True)
|
2021-09-01 12:30:00 -04:00
|
|
|
|
self.listtype = PlaylistType.ALL_SONGS
|
2013-10-11 20:34:14 +05:30
|
|
|
|
self.listtracks = tracks
|
|
|
|
|
|
|
|
|
|
|
|
def populate_m3u(self, data):
|
|
|
|
|
|
listtracks = []
|
|
|
|
|
|
for i in data:
|
|
|
|
|
|
if not i.startswith("#"):
|
|
|
|
|
|
path = i.strip()
|
2013-10-12 23:42:34 +05:30
|
|
|
|
if self.rename:
|
|
|
|
|
|
path = validate_unicode(path)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
listtracks.append(path)
|
|
|
|
|
|
return listtracks
|
|
|
|
|
|
|
|
|
|
|
|
def populate_pls(self, data):
|
|
|
|
|
|
sorttracks = []
|
|
|
|
|
|
for i in data:
|
2016-01-04 02:00:03 -08:00
|
|
|
|
dataarr = i.strip().split("=", 1)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
if dataarr[0].lower().startswith("file"):
|
|
|
|
|
|
num = int(dataarr[0][4:])
|
2016-08-27 17:42:01 +02:00
|
|
|
|
filename = urllib.parse.unquote(dataarr[1]).strip()
|
2013-10-11 20:34:14 +05:30
|
|
|
|
if filename.lower().startswith('file://'):
|
|
|
|
|
|
filename = filename[7:]
|
2013-10-12 23:42:34 +05:30
|
|
|
|
if self.rename:
|
|
|
|
|
|
filename = validate_unicode(filename)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
sorttracks.append((num, filename))
|
|
|
|
|
|
listtracks = [ x for (_, x) in sorted(sorttracks) ]
|
|
|
|
|
|
return listtracks
|
|
|
|
|
|
|
2016-04-05 20:58:35 +02:00
|
|
|
|
def populate_directory(self, playlistpath, recursive = True):
|
|
|
|
|
|
# Add all tracks inside the folder and its subfolders recursively.
|
|
|
|
|
|
# Folders containing no music and only a single Album
|
|
|
|
|
|
# would generate duplicated playlists. That is intended and "wont fix".
|
|
|
|
|
|
# Empty folders (inside the music path) will generate an error -> "wont fix".
|
|
|
|
|
|
listtracks = []
|
|
|
|
|
|
for (dirpath, dirnames, filenames) in os.walk(playlistpath):
|
|
|
|
|
|
dirnames.sort()
|
|
|
|
|
|
|
2016-04-05 22:03:10 +02:00
|
|
|
|
# Ignore any hidden directories
|
2016-04-06 18:10:24 +02:00
|
|
|
|
if "/." not in dirpath:
|
2016-04-05 22:03:10 +02:00
|
|
|
|
for filename in sorted(filenames, key = lambda x: x.lower()):
|
|
|
|
|
|
# Only add valid music files to playlist
|
|
|
|
|
|
if os.path.splitext(filename)[1].lower() in (".mp3", ".m4a", ".m4b", ".m4p", ".aa", ".wav"):
|
|
|
|
|
|
fullPath = os.path.abspath(os.path.join(dirpath, filename))
|
|
|
|
|
|
listtracks.append(fullPath)
|
2016-04-05 20:58:35 +02:00
|
|
|
|
if not recursive:
|
|
|
|
|
|
break
|
|
|
|
|
|
return listtracks
|
|
|
|
|
|
|
2013-10-11 20:34:14 +05:30
|
|
|
|
def remove_relatives(self, relative, filename):
|
|
|
|
|
|
base = os.path.dirname(os.path.abspath(filename))
|
|
|
|
|
|
if not os.path.exists(relative):
|
|
|
|
|
|
relative = os.path.join(base, relative)
|
2014-01-25 18:49:09 +05:30
|
|
|
|
fullPath = relative
|
|
|
|
|
|
return fullPath
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
2016-06-08 05:14:58 +05:30
|
|
|
|
def populate(self, obj):
|
2016-04-05 20:58:35 +02:00
|
|
|
|
# Create a playlist of the folder and all subfolders
|
2016-06-08 05:14:58 +05:30
|
|
|
|
if type(obj) == type(()):
|
|
|
|
|
|
self.listtracks = obj[1]
|
|
|
|
|
|
text = obj[0]
|
2016-04-05 20:58:35 +02:00
|
|
|
|
else:
|
2016-06-08 05:14:58 +05:30
|
|
|
|
filename = obj
|
2021-09-02 12:30:00 -04:00
|
|
|
|
if "/iPod_Control/Podcasts/" in filename:
|
|
|
|
|
|
self.listtype = PlaylistType.PODCAST
|
2016-06-08 05:14:58 +05:30
|
|
|
|
if os.path.isdir(filename):
|
|
|
|
|
|
self.listtracks = self.populate_directory(filename)
|
|
|
|
|
|
text = os.path.splitext(os.path.basename(filename))[0]
|
2016-04-05 20:58:35 +02:00
|
|
|
|
else:
|
2016-06-08 05:14:58 +05:30
|
|
|
|
# Read the playlist file
|
2021-03-13 13:03:19 +00:00
|
|
|
|
with open(filename, 'r', errors="replace") as f:
|
2016-06-08 05:14:58 +05:30
|
|
|
|
data = f.readlines()
|
|
|
|
|
|
|
|
|
|
|
|
extension = os.path.splitext(filename)[1].lower()
|
|
|
|
|
|
if extension == '.pls':
|
|
|
|
|
|
self.listtracks = self.populate_pls(data)
|
|
|
|
|
|
elif extension == '.m3u':
|
|
|
|
|
|
self.listtracks = self.populate_m3u(data)
|
|
|
|
|
|
else:
|
|
|
|
|
|
raise
|
|
|
|
|
|
|
|
|
|
|
|
# Ensure all paths are not relative to the playlist file
|
|
|
|
|
|
for i in range(len(self.listtracks)):
|
|
|
|
|
|
self.listtracks[i] = self.remove_relatives(self.listtracks[i], filename)
|
|
|
|
|
|
text = os.path.splitext(os.path.basename(filename))[0]
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
|
|
|
|
|
# Handle the VoiceOverData
|
2016-08-27 21:16:14 +02:00
|
|
|
|
self["dbid"] = hashlib.md5(text.encode('utf-8')).digest()[:8]
|
2013-10-11 20:34:14 +05:30
|
|
|
|
self.text_to_speech(text, self["dbid"], True)
|
|
|
|
|
|
|
2016-08-27 21:16:14 +02:00
|
|
|
|
def construct(self, tracks):
|
2013-10-11 20:34:14 +05:30
|
|
|
|
self["total_length"] = 44 + (4 * len(self.listtracks))
|
|
|
|
|
|
self["number_of_songs"] = 0
|
2021-09-01 12:30:00 -04:00
|
|
|
|
self["listtype"] = self.listtype.value
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
2016-08-27 18:08:16 +02:00
|
|
|
|
chunks = bytes()
|
2013-10-11 20:34:14 +05:30
|
|
|
|
for i in self.listtracks:
|
2016-02-04 14:36:58 +01:00
|
|
|
|
path = self.ipod_to_path(i)
|
|
|
|
|
|
position = -1
|
2021-09-01 12:30:00 -04:00
|
|
|
|
if PlaylistType.ALL_SONGS == self.listtype and "/iPod_Control/Podcasts/" in path:
|
|
|
|
|
|
# exclude podcasts from the "All Songs" playlist
|
2021-08-25 17:40:58 -04:00
|
|
|
|
continue
|
2013-10-12 00:10:02 +05:30
|
|
|
|
try:
|
2016-02-04 14:36:58 +01:00
|
|
|
|
position = tracks.index(path)
|
2013-10-12 00:10:02 +05:30
|
|
|
|
except:
|
2016-02-04 14:36:58 +01:00
|
|
|
|
# Print an error if no track was found.
|
|
|
|
|
|
# Empty playlists are handeled in the PlaylistHeader class.
|
2016-08-27 17:42:01 +02:00
|
|
|
|
print("Error: Could not find track \"" + path + "\".")
|
|
|
|
|
|
print("Maybe its an invalid FAT filesystem name. Please fix your playlist. Skipping track.")
|
2013-10-11 20:34:14 +05:30
|
|
|
|
if position > -1:
|
|
|
|
|
|
chunks += struct.pack("I", position)
|
|
|
|
|
|
self["number_of_songs"] += 1
|
|
|
|
|
|
self["number_of_nonaudio"] = self["number_of_songs"]
|
|
|
|
|
|
|
|
|
|
|
|
output = Record.construct(self)
|
|
|
|
|
|
return output + chunks
|
|
|
|
|
|
|
|
|
|
|
|
class Shuffler(object):
|
2016-08-27 13:19:21 +02:00
|
|
|
|
def __init__(self, path, track_voiceover=False, playlist_voiceover=False, rename=False, trackgain=0, auto_dir_playlists=None, auto_id3_playlists=None):
|
2016-06-08 02:57:04 +05:30
|
|
|
|
self.path = os.path.abspath(path)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
self.tracks = []
|
|
|
|
|
|
self.albums = []
|
|
|
|
|
|
self.artists = []
|
|
|
|
|
|
self.lists = []
|
|
|
|
|
|
self.tunessd = None
|
2016-08-27 13:19:21 +02:00
|
|
|
|
self.track_voiceover = track_voiceover
|
2016-04-06 22:08:28 +02:00
|
|
|
|
self.playlist_voiceover = playlist_voiceover
|
2013-10-12 23:42:34 +05:30
|
|
|
|
self.rename = rename
|
2014-06-24 15:10:57 +04:00
|
|
|
|
self.trackgain = trackgain
|
2016-06-08 05:14:58 +05:30
|
|
|
|
self.auto_dir_playlists = auto_dir_playlists
|
|
|
|
|
|
self.auto_id3_playlists = auto_id3_playlists
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
2013-10-12 18:32:00 +05:30
|
|
|
|
def initialize(self):
|
2014-06-24 04:05:32 +04:00
|
|
|
|
# remove existing voiceover files (they are either useless or will be overwritten anyway)
|
|
|
|
|
|
for dirname in ('iPod_Control/Speakable/Playlists', 'iPod_Control/Speakable/Tracks'):
|
|
|
|
|
|
shutil.rmtree(os.path.join(self.path, dirname), ignore_errors=True)
|
2021-08-25 16:13:35 -04:00
|
|
|
|
for dirname in ('iPod_Control/iTunes', 'iPod_Control/Music', 'iPod_Control/Podcasts', 'iPod_Control/Speakable/Playlists', 'iPod_Control/Speakable/Tracks'):
|
2013-10-12 18:32:00 +05:30
|
|
|
|
make_dir_if_absent(os.path.join(self.path, dirname))
|
|
|
|
|
|
|
2013-10-11 20:34:14 +05:30
|
|
|
|
def dump_state(self):
|
2016-08-27 17:42:01 +02:00
|
|
|
|
print("Shuffle DB state")
|
|
|
|
|
|
print("Tracks", self.tracks)
|
|
|
|
|
|
print("Albums", self.albums)
|
|
|
|
|
|
print("Artists", self.artists)
|
|
|
|
|
|
print("Playlists", self.lists)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
|
|
|
|
|
def populate(self):
|
|
|
|
|
|
self.tunessd = TunesSD(self)
|
|
|
|
|
|
for (dirpath, dirnames, filenames) in os.walk(self.path):
|
|
|
|
|
|
dirnames.sort()
|
2016-06-08 03:43:32 +05:30
|
|
|
|
relpath = get_relpath(dirpath, self.path)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
# Ignore the speakable directory and any hidden directories
|
2016-06-08 03:43:32 +05:30
|
|
|
|
if not is_path_prefix("iPod_Control/Speakable", relpath) and "/." not in dirpath:
|
2013-10-11 20:34:14 +05:30
|
|
|
|
for filename in sorted(filenames, key = lambda x: x.lower()):
|
2016-08-27 14:18:15 +02:00
|
|
|
|
# Ignore hidden files
|
|
|
|
|
|
if not filename.startswith("."):
|
|
|
|
|
|
fullPath = os.path.abspath(os.path.join(dirpath, filename))
|
|
|
|
|
|
if os.path.splitext(filename)[1].lower() in (".mp3", ".m4a", ".m4b", ".m4p", ".aa", ".wav"):
|
2021-08-31 23:00:10 -04:00
|
|
|
|
self.tracks.append(fullPath)
|
2016-08-27 14:18:15 +02:00
|
|
|
|
if os.path.splitext(filename)[1].lower() in (".pls", ".m3u"):
|
2021-08-31 23:00:10 -04:00
|
|
|
|
self.lists.append(fullPath)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
2016-04-05 20:58:35 +02:00
|
|
|
|
# Create automatic playlists in music directory.
|
|
|
|
|
|
# Ignore the (music) root and any hidden directories.
|
2021-08-25 11:57:21 -04:00
|
|
|
|
if self.auto_dir_playlists and ("iPod_Control/Music/" in dirpath or "iPod_Control/Podcasts/" in dirpath) and "/." not in dirpath:
|
2016-04-05 20:58:35 +02:00
|
|
|
|
# Only go to a specific depth. -1 is unlimted, 0 is ignored as there is already a master playlist.
|
|
|
|
|
|
depth = dirpath[len(self.path) + len(os.path.sep):].count(os.path.sep) - 1
|
2016-06-08 05:15:16 +05:30
|
|
|
|
if self.auto_dir_playlists < 0 or depth <= self.auto_dir_playlists:
|
2016-04-05 20:58:35 +02:00
|
|
|
|
self.lists.append(os.path.abspath(dirpath))
|
|
|
|
|
|
|
2016-06-08 05:15:16 +05:30
|
|
|
|
if self.auto_id3_playlists != None:
|
2016-08-27 21:02:59 +02:00
|
|
|
|
if mutagen:
|
|
|
|
|
|
for grouped_list in group_tracks_by_id3_template(self.tracks, self.auto_id3_playlists):
|
|
|
|
|
|
self.lists.append(grouped_list)
|
|
|
|
|
|
else:
|
|
|
|
|
|
print("Error: No mutagen found. Cannot generate auto-id3-playlists.")
|
|
|
|
|
|
sys.exit(1)
|
2016-06-08 05:15:16 +05:30
|
|
|
|
|
2013-10-11 20:34:14 +05:30
|
|
|
|
def write_database(self):
|
2016-08-27 21:02:59 +02:00
|
|
|
|
print("Writing database. This may take a while...")
|
2016-06-08 02:57:04 +05:30
|
|
|
|
with open(os.path.join(self.path, "iPod_Control", "iTunes", "iTunesSD"), "wb") as f:
|
2016-08-27 13:16:45 +02:00
|
|
|
|
try:
|
|
|
|
|
|
f.write(self.tunessd.construct())
|
|
|
|
|
|
except IOError as e:
|
2016-08-27 17:42:01 +02:00
|
|
|
|
print("I/O error({0}): {1}".format(e.errno, e.strerror))
|
|
|
|
|
|
print("Error: Writing iPod database failed.")
|
2016-08-27 13:16:45 +02:00
|
|
|
|
sys.exit(1)
|
2016-08-27 21:02:59 +02:00
|
|
|
|
|
2016-08-27 17:42:01 +02:00
|
|
|
|
print("Database written successfully:")
|
|
|
|
|
|
print("Tracks", len(self.tracks))
|
|
|
|
|
|
print("Albums", len(self.albums))
|
|
|
|
|
|
print("Artists", len(self.artists))
|
|
|
|
|
|
print("Playlists", len(self.lists))
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
|
# Read all files from the directory
|
|
|
|
|
|
# Construct the appropriate iTunesDB file
|
|
|
|
|
|
# Construct the appropriate iTunesSD file
|
|
|
|
|
|
# http://shuffle3db.wikispaces.com/iTunesSD3gen
|
2014-06-24 04:26:06 +04:00
|
|
|
|
# Use SVOX pico2wave and RHVoice to produce voiceover data
|
2013-10-11 20:34:14 +05:30
|
|
|
|
#
|
2013-10-12 00:10:02 +05:30
|
|
|
|
|
|
|
|
|
|
def check_unicode(path):
|
2013-10-12 18:32:00 +05:30
|
|
|
|
ret_flag = False # True if there is a recognizable file within this level
|
|
|
|
|
|
for item in os.listdir(path):
|
|
|
|
|
|
if os.path.isfile(os.path.join(path, item)):
|
2021-09-01 12:30:00 -04:00
|
|
|
|
if os.path.splitext(item)[1].lower() in all_ext:
|
2013-10-12 18:32:00 +05:30
|
|
|
|
ret_flag = True
|
|
|
|
|
|
if raises_unicode_error(item):
|
|
|
|
|
|
src = os.path.join(path, item)
|
2013-10-12 23:42:34 +05:30
|
|
|
|
dest = os.path.join(path, hash_error_unicode(item)) + os.path.splitext(item)[1].lower()
|
2016-08-27 17:42:01 +02:00
|
|
|
|
print('Renaming %s -> %s' % (src, dest))
|
2013-10-12 18:32:00 +05:30
|
|
|
|
os.rename(src, dest)
|
|
|
|
|
|
else:
|
|
|
|
|
|
ret_flag = (check_unicode(os.path.join(path, item)) or ret_flag)
|
|
|
|
|
|
if ret_flag and raises_unicode_error(item):
|
|
|
|
|
|
src = os.path.join(path, item)
|
2013-10-12 23:42:34 +05:30
|
|
|
|
new_name = hash_error_unicode(item)
|
2013-10-12 18:32:00 +05:30
|
|
|
|
dest = os.path.join(path, new_name)
|
2016-08-27 17:42:01 +02:00
|
|
|
|
print('Renaming %s -> %s' % (src, dest))
|
2013-10-12 00:10:02 +05:30
|
|
|
|
os.rename(src, dest)
|
2013-10-12 18:32:00 +05:30
|
|
|
|
return ret_flag
|
|
|
|
|
|
|
2014-06-24 15:10:57 +04:00
|
|
|
|
def nonnegative_int(string):
|
|
|
|
|
|
try:
|
|
|
|
|
|
intval = int(string)
|
|
|
|
|
|
except ValueError:
|
|
|
|
|
|
raise argparse.ArgumentTypeError("'%s' must be an integer" % string)
|
|
|
|
|
|
|
2014-06-28 19:38:01 +05:30
|
|
|
|
if intval < 0 or intval > 99:
|
|
|
|
|
|
raise argparse.ArgumentTypeError("Track gain value should be in range 0-99")
|
2014-06-24 15:10:57 +04:00
|
|
|
|
return intval
|
|
|
|
|
|
|
2016-01-17 17:14:46 +05:30
|
|
|
|
def checkPathValidity(path):
|
|
|
|
|
|
if not os.path.isdir(result.path):
|
2016-08-27 17:42:01 +02:00
|
|
|
|
print("Error finding IPod directory. Maybe it is not connected or mounted?")
|
2016-01-17 17:14:46 +05:30
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
|
|
|
|
if not os.access(result.path, os.W_OK):
|
2016-08-27 17:42:01 +02:00
|
|
|
|
print('Unable to get write permissions in the IPod directory')
|
2016-01-17 17:14:46 +05:30
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
2016-01-23 22:25:59 +05:30
|
|
|
|
def handle_interrupt(signal, frame):
|
2016-08-27 17:42:01 +02:00
|
|
|
|
print("Interrupt detected, exiting...")
|
2016-01-23 22:25:59 +05:30
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
2013-10-11 20:34:14 +05:30
|
|
|
|
if __name__ == '__main__':
|
2016-01-23 22:25:59 +05:30
|
|
|
|
signal.signal(signal.SIGINT, handle_interrupt)
|
2016-06-08 05:14:58 +05:30
|
|
|
|
|
2016-04-05 20:55:44 +02:00
|
|
|
|
parser = argparse.ArgumentParser(description=
|
|
|
|
|
|
'Python script for building the Track and Playlist database '
|
2020-06-09 23:21:38 +02:00
|
|
|
|
'for the newer gen IPod Shuffle. Version 1.5')
|
2016-06-08 05:14:58 +05:30
|
|
|
|
|
2016-08-27 14:49:15 +02:00
|
|
|
|
parser.add_argument('-t', '--track-voiceover', action='store_true',
|
2016-04-06 22:08:28 +02:00
|
|
|
|
help='Enable track voiceover feature')
|
2016-06-08 05:14:58 +05:30
|
|
|
|
|
2016-08-27 14:49:15 +02:00
|
|
|
|
parser.add_argument('-p', '--playlist-voiceover', action='store_true',
|
2016-04-06 22:08:28 +02:00
|
|
|
|
help='Enable playlist voiceover feature')
|
2016-06-08 05:14:58 +05:30
|
|
|
|
|
2016-08-27 14:49:15 +02:00
|
|
|
|
parser.add_argument('-u', '--rename-unicode', action='store_true',
|
2016-04-05 20:57:34 +02:00
|
|
|
|
help='Rename files causing unicode errors, will do minimal required renaming')
|
2016-06-08 05:14:58 +05:30
|
|
|
|
|
2016-08-27 14:49:15 +02:00
|
|
|
|
parser.add_argument('-g', '--track-gain', type=nonnegative_int, default='0',
|
2016-04-05 20:57:34 +02:00
|
|
|
|
help='Specify volume gain (0-99) for all tracks; '
|
|
|
|
|
|
'0 (default) means no gain and is usually fine; '
|
|
|
|
|
|
'e.g. 60 is very loud even on minimal player volume')
|
2016-06-08 05:14:58 +05:30
|
|
|
|
|
2016-08-27 14:49:15 +02:00
|
|
|
|
parser.add_argument('-d', '--auto-dir-playlists', type=int, default=None, const=-1, nargs='?',
|
2016-04-05 20:58:35 +02:00
|
|
|
|
help='Generate automatic playlists for each folder recursively inside '
|
|
|
|
|
|
'"IPod_Control/Music/". You can optionally limit the depth: '
|
|
|
|
|
|
'0=root, 1=artist, 2=album, n=subfoldername, default=-1 (No Limit).')
|
2016-06-08 05:14:58 +05:30
|
|
|
|
|
2016-08-27 14:49:15 +02:00
|
|
|
|
parser.add_argument('-i', '--auto-id3-playlists', type=str, default=None, metavar='ID3_TEMPLATE', const='{artist}', nargs='?',
|
2016-06-08 05:14:58 +05:30
|
|
|
|
help='Generate automatic playlists based on the id3 tags of any music '
|
|
|
|
|
|
'added to the iPod. You can optionally specify a template string '
|
|
|
|
|
|
'based on which id3 tags are used to generate playlists. For eg. '
|
|
|
|
|
|
'\'{artist} - {album}\' will use the pair of artist and album to group '
|
|
|
|
|
|
'tracks under one playlist. Similarly \'{genre}\' will group tracks based '
|
|
|
|
|
|
'on their genre tag. Default template used is \'{artist}\'')
|
|
|
|
|
|
|
2016-08-27 14:49:15 +02:00
|
|
|
|
parser.add_argument('-v', '--verbose', action='store_true',
|
2016-08-27 14:39:25 +02:00
|
|
|
|
help='Show verbose output of database generation.')
|
|
|
|
|
|
|
2016-01-17 12:26:07 +01:00
|
|
|
|
parser.add_argument('path', help='Path to the IPod\'s root directory')
|
2016-06-08 05:14:58 +05:30
|
|
|
|
|
2013-10-11 20:34:14 +05:30
|
|
|
|
result = parser.parse_args()
|
|
|
|
|
|
|
2016-08-27 14:39:25 +02:00
|
|
|
|
# Enable verbose printing if desired
|
2016-08-27 18:08:16 +02:00
|
|
|
|
verboseprint = print if result.verbose else lambda *a, **k: None
|
2016-08-27 14:39:25 +02:00
|
|
|
|
|
2016-01-17 17:14:46 +05:30
|
|
|
|
checkPathValidity(result.path)
|
2016-01-17 12:15:43 +01:00
|
|
|
|
|
2013-10-12 18:32:00 +05:30
|
|
|
|
if result.rename_unicode:
|
|
|
|
|
|
check_unicode(result.path)
|
2013-10-12 00:10:02 +05:30
|
|
|
|
|
2016-08-27 21:02:59 +02:00
|
|
|
|
if not mutagen:
|
|
|
|
|
|
print("Warning: No mutagen found. Database will not contain any album nor artist information.")
|
|
|
|
|
|
|
2016-08-27 14:39:56 +02:00
|
|
|
|
verboseprint("Playlist voiceover requested:", result.playlist_voiceover)
|
|
|
|
|
|
verboseprint("Track voiceover requested:", result.track_voiceover)
|
|
|
|
|
|
if (result.track_voiceover or result.playlist_voiceover):
|
|
|
|
|
|
if not Text2Speech.check_support():
|
2016-08-27 17:42:01 +02:00
|
|
|
|
print("Error: Did not find any voiceover program. Voiceover disabled.")
|
2016-08-27 13:19:21 +02:00
|
|
|
|
result.track_voiceover = False
|
2016-06-08 05:14:58 +05:30
|
|
|
|
result.playlist_voiceover = False
|
2016-08-27 14:39:56 +02:00
|
|
|
|
else:
|
|
|
|
|
|
verboseprint("Voiceover available.")
|
2016-01-13 04:08:49 +05:30
|
|
|
|
|
2016-08-27 21:03:25 +02:00
|
|
|
|
shuffle = Shuffler(result.path,
|
|
|
|
|
|
track_voiceover=result.track_voiceover,
|
|
|
|
|
|
playlist_voiceover=result.playlist_voiceover,
|
|
|
|
|
|
rename=result.rename_unicode,
|
|
|
|
|
|
trackgain=result.track_gain,
|
|
|
|
|
|
auto_dir_playlists=result.auto_dir_playlists,
|
|
|
|
|
|
auto_id3_playlists=result.auto_id3_playlists)
|
2013-10-12 18:32:00 +05:30
|
|
|
|
shuffle.initialize()
|
2013-10-11 20:34:14 +05:30
|
|
|
|
shuffle.populate()
|
|
|
|
|
|
shuffle.write_database()
|