2016-08-27 18:08:16 +02:00
|
|
|
|
#!/usr/bin/env python3
|
2016-08-27 21:02:59 +02:00
|
|
|
|
|
|
|
|
|
|
# Builtin libraries
|
2024-07-07 10:05:01 -04:00
|
|
|
|
import argparse
|
2013-10-11 20:34:14 +05:30
|
|
|
|
import collections
|
|
|
|
|
|
import errno
|
2024-07-07 10:05:01 -04:00
|
|
|
|
import hashlib
|
|
|
|
|
|
import os
|
2014-06-24 04:26:06 +04:00
|
|
|
|
import re
|
2024-07-07 10:05:01 -04:00
|
|
|
|
import shutil
|
2016-01-23 22:25:59 +05:30
|
|
|
|
import signal
|
2024-07-07 10:05:01 -04:00
|
|
|
|
import struct
|
|
|
|
|
|
import subprocess
|
|
|
|
|
|
import sys
|
|
|
|
|
|
import tempfile
|
|
|
|
|
|
import urllib.error
|
|
|
|
|
|
import urllib.parse
|
|
|
|
|
|
import urllib.request
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
2016-08-27 21:02:59 +02:00
|
|
|
|
# External libraries
|
|
|
|
|
|
try:
|
|
|
|
|
|
import mutagen
|
|
|
|
|
|
except ImportError:
|
|
|
|
|
|
mutagen = None
|
|
|
|
|
|
|
2013-10-12 18:32:00 +05:30
|
|
|
|
audio_ext = (".mp3", ".m4a", ".m4b", ".m4p", ".aa", ".wav")
|
|
|
|
|
|
list_ext = (".pls", ".m3u")
|
2024-07-07 10:05:01 -04:00
|
|
|
|
|
|
|
|
|
|
|
2013-10-12 18:32:00 +05:30
|
|
|
|
def make_dir_if_absent(path):
|
|
|
|
|
|
try:
|
|
|
|
|
|
os.makedirs(path)
|
|
|
|
|
|
except OSError as exc:
|
|
|
|
|
|
if exc.errno != errno.EEXIST:
|
|
|
|
|
|
raise
|
|
|
|
|
|
|
2024-07-07 10:05:01 -04:00
|
|
|
|
|
2013-10-12 18:32:00 +05:30
|
|
|
|
def raises_unicode_error(str):
|
|
|
|
|
|
try:
|
2024-07-07 10:05:01 -04:00
|
|
|
|
str.encode("latin-1")
|
2013-10-12 18:32:00 +05:30
|
|
|
|
return False
|
2014-06-24 04:03:12 +04:00
|
|
|
|
except (UnicodeEncodeError, UnicodeDecodeError):
|
2013-10-12 18:32:00 +05:30
|
|
|
|
return True
|
|
|
|
|
|
|
2024-07-07 10:05:01 -04:00
|
|
|
|
|
2013-10-12 23:42:34 +05:30
|
|
|
|
def hash_error_unicode(item):
|
2024-07-07 10:05:01 -04:00
|
|
|
|
item_bytes = item.encode("utf-8")
|
|
|
|
|
|
return "".join(
|
|
|
|
|
|
[
|
|
|
|
|
|
"{0:02X}".format(ord(x))
|
|
|
|
|
|
for x in reversed(hashlib.md5(item_bytes).hexdigest()[:8])
|
|
|
|
|
|
]
|
|
|
|
|
|
)
|
|
|
|
|
|
|
2013-10-12 23:42:34 +05:30
|
|
|
|
|
|
|
|
|
|
def validate_unicode(path):
|
2024-07-07 10:05:01 -04:00
|
|
|
|
path_list = path.split("/")
|
2013-10-12 23:42:34 +05:30
|
|
|
|
last_raise = False
|
2016-08-27 17:42:01 +02:00
|
|
|
|
for i in range(len(path_list)):
|
2013-10-12 23:42:34 +05:30
|
|
|
|
if raises_unicode_error(path_list[i]):
|
|
|
|
|
|
path_list[i] = hash_error_unicode(path_list[i])
|
|
|
|
|
|
last_raise = True
|
|
|
|
|
|
else:
|
|
|
|
|
|
last_raise = False
|
|
|
|
|
|
extension = os.path.splitext(path)[1].lower()
|
2024-07-07 10:05:01 -04:00
|
|
|
|
return "/".join(path_list) + (
|
|
|
|
|
|
extension if last_raise and extension in audio_ext else ""
|
|
|
|
|
|
)
|
|
|
|
|
|
|
2013-10-12 23:42:34 +05:30
|
|
|
|
|
2016-01-13 04:08:49 +05:30
|
|
|
|
def exec_exists_in_path(command):
|
2024-07-07 10:05:01 -04:00
|
|
|
|
with open(os.devnull, "w") as FNULL:
|
2016-01-13 04:08:49 +05:30
|
|
|
|
try:
|
2024-07-07 10:05:01 -04:00
|
|
|
|
with open(os.devnull, "r") as RFNULL:
|
|
|
|
|
|
subprocess.call(
|
|
|
|
|
|
[command], stdout=FNULL, stderr=subprocess.STDOUT, stdin=RFNULL
|
|
|
|
|
|
)
|
2016-03-20 14:43:27 +00:00
|
|
|
|
return True
|
2016-01-13 04:08:49 +05:30
|
|
|
|
except OSError as e:
|
|
|
|
|
|
return False
|
|
|
|
|
|
|
2024-07-07 10:05:01 -04:00
|
|
|
|
|
2016-06-08 03:43:32 +05:30
|
|
|
|
def splitpath(path):
|
|
|
|
|
|
return path.split(os.sep)
|
|
|
|
|
|
|
2024-07-07 10:05:01 -04:00
|
|
|
|
|
2016-06-08 03:43:32 +05:30
|
|
|
|
def get_relpath(path, basepath):
|
2024-07-07 10:05:01 -04:00
|
|
|
|
commonprefix = os.sep.join(
|
|
|
|
|
|
os.path.commonprefix(list(map(splitpath, [path, basepath])))
|
|
|
|
|
|
)
|
2016-06-08 03:43:32 +05:30
|
|
|
|
return os.path.relpath(path, commonprefix)
|
|
|
|
|
|
|
2024-07-07 10:05:01 -04:00
|
|
|
|
|
2016-06-08 03:43:32 +05:30
|
|
|
|
def is_path_prefix(prefix, path):
|
2024-07-07 10:05:01 -04:00
|
|
|
|
return prefix == os.sep.join(
|
|
|
|
|
|
os.path.commonprefix(list(map(splitpath, [prefix, path])))
|
|
|
|
|
|
)
|
|
|
|
|
|
|
2016-06-08 03:43:32 +05:30
|
|
|
|
|
2016-06-08 05:14:58 +05:30
|
|
|
|
def group_tracks_by_id3_template(tracks, template):
|
|
|
|
|
|
grouped_tracks_dict = {}
|
2024-07-07 10:05:01 -04:00
|
|
|
|
template_vars = set(re.findall(r"{.*?}", template))
|
2016-06-08 05:14:58 +05:30
|
|
|
|
for track in tracks:
|
|
|
|
|
|
try:
|
|
|
|
|
|
id3_dict = mutagen.File(track, easy=True)
|
|
|
|
|
|
except:
|
|
|
|
|
|
id3_dict = {}
|
|
|
|
|
|
|
|
|
|
|
|
key = template
|
|
|
|
|
|
single_var_present = False
|
|
|
|
|
|
for var in template_vars:
|
2024-07-07 10:05:01 -04:00
|
|
|
|
val = id3_dict.get(var[1:-1], [""])[0]
|
2016-06-08 05:14:58 +05:30
|
|
|
|
if len(val) > 0:
|
|
|
|
|
|
single_var_present = True
|
|
|
|
|
|
key = key.replace(var, val)
|
|
|
|
|
|
|
|
|
|
|
|
if single_var_present:
|
|
|
|
|
|
if key not in grouped_tracks_dict:
|
|
|
|
|
|
grouped_tracks_dict[key] = []
|
|
|
|
|
|
grouped_tracks_dict[key].append(track)
|
|
|
|
|
|
|
|
|
|
|
|
return sorted(grouped_tracks_dict.items())
|
|
|
|
|
|
|
2024-07-07 10:05:01 -04:00
|
|
|
|
|
2014-06-24 04:26:06 +04:00
|
|
|
|
class Text2Speech(object):
|
2024-07-07 10:05:01 -04:00
|
|
|
|
valid_tts = {"pico2wave": True, "RHVoice": True, "espeak": True, "say": True}
|
2016-01-13 04:08:49 +05:30
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
|
def check_support():
|
2016-01-24 13:07:50 +01:00
|
|
|
|
voiceoverAvailable = False
|
|
|
|
|
|
|
2021-08-28 10:13:03 -04:00
|
|
|
|
# Check for macOS say voiceover
|
2021-08-24 22:46:05 -04:00
|
|
|
|
if not exec_exists_in_path("say"):
|
2024-07-07 10:05:01 -04:00
|
|
|
|
Text2Speech.valid_tts["say"] = False
|
2021-08-28 10:13:03 -04:00
|
|
|
|
print("Warning: macOS say not found, voicever won't be generated using it.")
|
2021-08-24 22:46:05 -04:00
|
|
|
|
else:
|
|
|
|
|
|
voiceoverAvailable = True
|
|
|
|
|
|
|
2016-01-24 13:07:50 +01:00
|
|
|
|
# Check for pico2wave voiceover
|
2016-01-13 04:08:49 +05:30
|
|
|
|
if not exec_exists_in_path("pico2wave"):
|
2024-07-07 10:05:01 -04:00
|
|
|
|
Text2Speech.valid_tts["pico2wave"] = False
|
2016-08-27 17:42:01 +02:00
|
|
|
|
print("Warning: pico2wave not found, voicever won't be generated using it.")
|
2016-01-24 13:07:50 +01:00
|
|
|
|
else:
|
|
|
|
|
|
voiceoverAvailable = True
|
|
|
|
|
|
|
2016-03-20 14:43:27 +00:00
|
|
|
|
# Check for espeak voiceover
|
|
|
|
|
|
if not exec_exists_in_path("espeak"):
|
2024-07-07 10:05:01 -04:00
|
|
|
|
Text2Speech.valid_tts["espeak"] = False
|
2016-08-27 17:42:01 +02:00
|
|
|
|
print("Warning: espeak not found, voicever won't be generated using it.")
|
2016-03-20 14:43:27 +00:00
|
|
|
|
else:
|
|
|
|
|
|
voiceoverAvailable = True
|
|
|
|
|
|
|
2016-01-24 13:07:50 +01:00
|
|
|
|
# Check for Russian RHVoice voiceover
|
2016-01-13 04:08:49 +05:30
|
|
|
|
if not exec_exists_in_path("RHVoice"):
|
2024-07-07 10:05:01 -04:00
|
|
|
|
Text2Speech.valid_tts["RHVoice"] = False
|
2016-08-27 17:42:01 +02:00
|
|
|
|
print("Warning: RHVoice not found, Russian voicever won't be generated.")
|
2016-01-24 13:07:50 +01:00
|
|
|
|
else:
|
|
|
|
|
|
voiceoverAvailable = True
|
|
|
|
|
|
|
|
|
|
|
|
# Return if we at least found one voiceover program.
|
|
|
|
|
|
# Otherwise this will result in silent voiceover for tracks and "Playlist N" for playlists.
|
|
|
|
|
|
return voiceoverAvailable
|
2014-06-24 04:26:06 +04:00
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
|
def text2speech(out_wav_path, text):
|
2016-02-08 00:40:34 +05:30
|
|
|
|
# Skip voiceover generation if a track with the same name is used.
|
2016-01-17 20:45:58 +01:00
|
|
|
|
# This might happen with "Track001" or "01. Intro" names for example.
|
|
|
|
|
|
if os.path.isfile(out_wav_path):
|
2016-08-27 14:39:25 +02:00
|
|
|
|
verboseprint("Using existing", out_wav_path)
|
2016-01-18 18:33:13 +01:00
|
|
|
|
return True
|
2016-01-17 20:45:58 +01:00
|
|
|
|
|
2014-06-24 04:26:06 +04:00
|
|
|
|
# ensure we deal with unicode later
|
2016-08-27 17:42:01 +02:00
|
|
|
|
if not isinstance(text, str):
|
2024-07-07 10:05:01 -04:00
|
|
|
|
text = str(text, "utf-8")
|
2014-06-24 04:26:06 +04:00
|
|
|
|
lang = Text2Speech.guess_lang(text)
|
|
|
|
|
|
if lang == "ru-RU":
|
2016-03-24 21:45:50 +05:30
|
|
|
|
return Text2Speech.rhvoice(out_wav_path, text)
|
2014-06-24 04:26:06 +04:00
|
|
|
|
else:
|
2016-03-24 21:45:50 +05:30
|
|
|
|
if Text2Speech.pico2wave(out_wav_path, text):
|
|
|
|
|
|
return True
|
|
|
|
|
|
elif Text2Speech.espeak(out_wav_path, text):
|
|
|
|
|
|
return True
|
2021-08-24 22:46:05 -04:00
|
|
|
|
elif Text2Speech.say(out_wav_path, text):
|
|
|
|
|
|
return True
|
2016-03-20 14:43:27 +00:00
|
|
|
|
else:
|
|
|
|
|
|
return False
|
2014-06-24 04:26:06 +04:00
|
|
|
|
|
|
|
|
|
|
# guess-language seems like an overkill for now
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
|
def guess_lang(unicodetext):
|
2024-07-07 10:05:01 -04:00
|
|
|
|
lang = "en-GB"
|
2016-08-27 17:42:01 +02:00
|
|
|
|
if re.search("[А-Яа-я]", unicodetext) is not None:
|
2024-07-07 10:05:01 -04:00
|
|
|
|
lang = "ru-RU"
|
2014-06-24 04:26:06 +04:00
|
|
|
|
return lang
|
|
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
|
def pico2wave(out_wav_path, unicodetext):
|
2024-07-07 10:05:01 -04:00
|
|
|
|
if not Text2Speech.valid_tts["pico2wave"]:
|
2016-01-13 04:08:49 +05:30
|
|
|
|
return False
|
2024-07-07 10:05:01 -04:00
|
|
|
|
subprocess.call(
|
|
|
|
|
|
["pico2wave", "-l", "en-GB", "-w", out_wav_path, " ", unicodetext]
|
|
|
|
|
|
)
|
2016-01-17 20:45:58 +01:00
|
|
|
|
return True
|
2014-06-24 04:26:06 +04:00
|
|
|
|
|
2021-08-24 22:46:05 -04:00
|
|
|
|
@staticmethod
|
|
|
|
|
|
def say(out_wav_path, unicodetext):
|
2024-07-07 10:05:01 -04:00
|
|
|
|
if not Text2Speech.valid_tts["say"]:
|
2021-08-24 22:46:05 -04:00
|
|
|
|
return False
|
2024-07-07 10:05:01 -04:00
|
|
|
|
subprocess.call(
|
|
|
|
|
|
[
|
|
|
|
|
|
"say",
|
|
|
|
|
|
"-o",
|
|
|
|
|
|
out_wav_path,
|
|
|
|
|
|
"--data-format=LEI16",
|
|
|
|
|
|
"--file-format=WAVE",
|
|
|
|
|
|
"--",
|
|
|
|
|
|
unicodetext,
|
|
|
|
|
|
]
|
|
|
|
|
|
)
|
2021-08-24 22:46:05 -04:00
|
|
|
|
return True
|
|
|
|
|
|
|
2016-03-20 14:43:27 +00:00
|
|
|
|
@staticmethod
|
|
|
|
|
|
def espeak(out_wav_path, unicodetext):
|
2024-07-07 10:05:01 -04:00
|
|
|
|
if not Text2Speech.valid_tts["espeak"]:
|
2016-03-20 14:43:27 +00:00
|
|
|
|
return False
|
2024-07-07 10:05:01 -04:00
|
|
|
|
subprocess.call(
|
|
|
|
|
|
[
|
|
|
|
|
|
"espeak",
|
|
|
|
|
|
"-v",
|
|
|
|
|
|
"english_rp",
|
|
|
|
|
|
"-s",
|
|
|
|
|
|
"150",
|
|
|
|
|
|
"-w",
|
|
|
|
|
|
out_wav_path,
|
|
|
|
|
|
"--",
|
|
|
|
|
|
unicodetext,
|
|
|
|
|
|
]
|
|
|
|
|
|
)
|
2016-03-20 14:43:27 +00:00
|
|
|
|
return True
|
|
|
|
|
|
|
2014-06-24 04:26:06 +04:00
|
|
|
|
@staticmethod
|
|
|
|
|
|
def rhvoice(out_wav_path, unicodetext):
|
2024-07-07 10:05:01 -04:00
|
|
|
|
if not Text2Speech.valid_tts["RHVoice"]:
|
2016-01-13 04:08:49 +05:30
|
|
|
|
return False
|
|
|
|
|
|
|
2014-06-24 04:26:06 +04:00
|
|
|
|
tmp_file = tempfile.NamedTemporaryFile(suffix=".wav", delete=False)
|
|
|
|
|
|
tmp_file.close()
|
|
|
|
|
|
|
2024-07-07 10:05:01 -04:00
|
|
|
|
proc = subprocess.Popen(
|
|
|
|
|
|
[
|
|
|
|
|
|
"RHVoice",
|
|
|
|
|
|
"--voice=Elena",
|
|
|
|
|
|
"--variant=Russian",
|
|
|
|
|
|
"--volume=100",
|
|
|
|
|
|
"-o",
|
|
|
|
|
|
tmp_file.name,
|
|
|
|
|
|
],
|
|
|
|
|
|
stdin=subprocess.PIPE,
|
|
|
|
|
|
)
|
|
|
|
|
|
proc.communicate(input=unicodetext.encode("utf-8"))
|
2014-06-24 04:26:06 +04:00
|
|
|
|
# make a little bit louder to be comparable with pico2wave
|
|
|
|
|
|
subprocess.call(["sox", tmp_file.name, out_wav_path, "norm"])
|
|
|
|
|
|
|
|
|
|
|
|
os.remove(tmp_file.name)
|
2016-01-17 20:45:58 +01:00
|
|
|
|
return True
|
2014-06-24 04:26:06 +04:00
|
|
|
|
|
2013-10-12 23:42:34 +05:30
|
|
|
|
|
2013-10-11 20:34:14 +05:30
|
|
|
|
class Record(object):
|
|
|
|
|
|
|
|
|
|
|
|
def __init__(self, parent):
|
|
|
|
|
|
self.parent = parent
|
|
|
|
|
|
self._struct = collections.OrderedDict([])
|
|
|
|
|
|
self._fields = {}
|
2016-08-27 13:19:21 +02:00
|
|
|
|
self.track_voiceover = parent.track_voiceover
|
2016-04-06 22:08:28 +02:00
|
|
|
|
self.playlist_voiceover = parent.playlist_voiceover
|
2013-10-12 23:42:34 +05:30
|
|
|
|
self.rename = parent.rename
|
2014-06-24 15:10:57 +04:00
|
|
|
|
self.trackgain = parent.trackgain
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
|
|
|
|
|
def __getitem__(self, item):
|
2016-08-27 17:42:01 +02:00
|
|
|
|
if item not in list(self._struct.keys()):
|
2013-10-11 20:34:14 +05:30
|
|
|
|
raise KeyError
|
|
|
|
|
|
return self._fields.get(item, self._struct[item][1])
|
|
|
|
|
|
|
|
|
|
|
|
def __setitem__(self, item, value):
|
|
|
|
|
|
self._fields[item] = value
|
|
|
|
|
|
|
|
|
|
|
|
def construct(self):
|
2016-08-27 18:08:16 +02:00
|
|
|
|
output = bytes()
|
2016-08-27 17:42:01 +02:00
|
|
|
|
for i in list(self._struct.keys()):
|
2013-10-11 20:34:14 +05:30
|
|
|
|
(fmt, default) = self._struct[i]
|
|
|
|
|
|
output += struct.pack("<" + fmt, self._fields.get(i, default))
|
|
|
|
|
|
return output
|
|
|
|
|
|
|
2024-07-07 10:05:01 -04:00
|
|
|
|
def text_to_speech(self, text, dbid, playlist=False):
|
|
|
|
|
|
if (
|
|
|
|
|
|
self.track_voiceover
|
|
|
|
|
|
and not playlist
|
|
|
|
|
|
or self.playlist_voiceover
|
|
|
|
|
|
and playlist
|
|
|
|
|
|
):
|
2013-10-11 20:34:14 +05:30
|
|
|
|
# Create the voiceover wav file
|
2024-07-07 10:05:01 -04:00
|
|
|
|
fn = "".join(format(x, "02x") for x in reversed(dbid))
|
|
|
|
|
|
path = os.path.join(
|
|
|
|
|
|
self.base,
|
|
|
|
|
|
"iPod_Control",
|
|
|
|
|
|
"Speakable",
|
|
|
|
|
|
"Tracks" if not playlist else "Playlists",
|
|
|
|
|
|
fn + ".wav",
|
|
|
|
|
|
)
|
2016-01-17 20:45:58 +01:00
|
|
|
|
return Text2Speech.text2speech(path, text)
|
|
|
|
|
|
return False
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
|
|
|
|
|
def path_to_ipod(self, filename):
|
|
|
|
|
|
if os.path.commonprefix([os.path.abspath(filename), self.base]) != self.base:
|
2024-07-07 10:05:01 -04:00
|
|
|
|
raise IOError(
|
|
|
|
|
|
"Cannot get Ipod filename, since file is outside the IPOD path"
|
|
|
|
|
|
)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
baselen = len(self.base)
|
|
|
|
|
|
if self.base.endswith(os.path.sep):
|
|
|
|
|
|
baselen -= 1
|
|
|
|
|
|
ipodname = "/".join(os.path.abspath(filename)[baselen:].split(os.path.sep))
|
|
|
|
|
|
return ipodname
|
|
|
|
|
|
|
|
|
|
|
|
def ipod_to_path(self, ipodname):
|
2024-07-07 10:05:01 -04:00
|
|
|
|
return os.path.abspath(
|
|
|
|
|
|
os.path.join(self.base, os.path.sep.join(ipodname.split("/")))
|
|
|
|
|
|
)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
|
def shuffledb(self):
|
|
|
|
|
|
parent = self.parent
|
|
|
|
|
|
while parent.__class__ != Shuffler:
|
|
|
|
|
|
parent = parent.parent
|
|
|
|
|
|
return parent
|
|
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
|
def base(self):
|
2016-06-08 02:57:04 +05:30
|
|
|
|
return self.shuffledb.path
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
|
def tracks(self):
|
|
|
|
|
|
return self.shuffledb.tracks
|
|
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
|
def albums(self):
|
|
|
|
|
|
return self.shuffledb.albums
|
|
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
|
def artists(self):
|
|
|
|
|
|
return self.shuffledb.artists
|
|
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
|
def lists(self):
|
|
|
|
|
|
return self.shuffledb.lists
|
|
|
|
|
|
|
2024-07-07 10:05:01 -04:00
|
|
|
|
|
2013-10-11 20:34:14 +05:30
|
|
|
|
class TunesSD(Record):
|
|
|
|
|
|
def __init__(self, parent):
|
|
|
|
|
|
Record.__init__(self, parent)
|
|
|
|
|
|
self.track_header = TrackHeader(self)
|
|
|
|
|
|
self.play_header = PlaylistHeader(self)
|
2024-07-07 10:05:01 -04:00
|
|
|
|
self._struct = collections.OrderedDict(
|
|
|
|
|
|
[
|
|
|
|
|
|
("header_id", ("4s", b"bdhs")), # shdb
|
|
|
|
|
|
("unknown1", ("I", 0x02000003)),
|
|
|
|
|
|
("total_length", ("I", 64)),
|
|
|
|
|
|
("total_number_of_tracks", ("I", 0)),
|
|
|
|
|
|
("total_number_of_playlists", ("I", 0)),
|
|
|
|
|
|
("unknown2", ("Q", 0)),
|
|
|
|
|
|
("max_volume", ("B", 0)),
|
|
|
|
|
|
("voiceover_enabled", ("B", int(self.track_voiceover))),
|
|
|
|
|
|
("unknown3", ("H", 0)),
|
|
|
|
|
|
("total_tracks_without_podcasts", ("I", 0)),
|
|
|
|
|
|
("track_header_offset", ("I", 64)),
|
|
|
|
|
|
("playlist_header_offset", ("I", 0)),
|
|
|
|
|
|
("unknown4", ("20s", b"\x00" * 20)),
|
|
|
|
|
|
]
|
|
|
|
|
|
)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
|
|
|
|
|
def construct(self):
|
2016-01-17 12:15:27 +01:00
|
|
|
|
# The header is a fixed length, so no need to calculate it
|
2013-10-11 20:34:14 +05:30
|
|
|
|
self.track_header.base_offset = 64
|
|
|
|
|
|
track_header = self.track_header.construct()
|
|
|
|
|
|
|
|
|
|
|
|
# The playlist offset will depend on the number of tracks
|
|
|
|
|
|
self.play_header.base_offset = self.track_header.base_offset + len(track_header)
|
|
|
|
|
|
play_header = self.play_header.construct(self.track_header.tracks)
|
|
|
|
|
|
self["playlist_header_offset"] = self.play_header.base_offset
|
|
|
|
|
|
|
|
|
|
|
|
self["total_number_of_tracks"] = self.track_header["number_of_tracks"]
|
|
|
|
|
|
self["total_tracks_without_podcasts"] = self.track_header["number_of_tracks"]
|
|
|
|
|
|
self["total_number_of_playlists"] = self.play_header["number_of_playlists"]
|
|
|
|
|
|
|
|
|
|
|
|
output = Record.construct(self)
|
|
|
|
|
|
return output + track_header + play_header
|
|
|
|
|
|
|
2024-07-07 10:05:01 -04:00
|
|
|
|
|
2013-10-11 20:34:14 +05:30
|
|
|
|
class TrackHeader(Record):
|
|
|
|
|
|
def __init__(self, parent):
|
|
|
|
|
|
self.base_offset = 0
|
|
|
|
|
|
Record.__init__(self, parent)
|
2024-07-07 10:05:01 -04:00
|
|
|
|
self._struct = collections.OrderedDict(
|
|
|
|
|
|
[
|
|
|
|
|
|
("header_id", ("4s", b"hths")), # shth
|
|
|
|
|
|
("total_length", ("I", 0)),
|
|
|
|
|
|
("number_of_tracks", ("I", 0)),
|
|
|
|
|
|
("unknown1", ("Q", 0)),
|
|
|
|
|
|
]
|
|
|
|
|
|
)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
|
|
|
|
|
def construct(self):
|
|
|
|
|
|
self["number_of_tracks"] = len(self.tracks)
|
|
|
|
|
|
self["total_length"] = 20 + (len(self.tracks) * 4)
|
|
|
|
|
|
output = Record.construct(self)
|
|
|
|
|
|
|
|
|
|
|
|
# Construct the underlying tracks
|
2016-08-27 18:08:16 +02:00
|
|
|
|
track_chunk = bytes()
|
2013-10-11 20:34:14 +05:30
|
|
|
|
for i in self.tracks:
|
|
|
|
|
|
track = Track(self)
|
2016-08-27 14:39:25 +02:00
|
|
|
|
verboseprint("[*] Adding track", i)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
track.populate(i)
|
2024-07-07 10:05:01 -04:00
|
|
|
|
output += struct.pack(
|
|
|
|
|
|
"I", self.base_offset + self["total_length"] + len(track_chunk)
|
|
|
|
|
|
)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
track_chunk += track.construct()
|
|
|
|
|
|
return output + track_chunk
|
|
|
|
|
|
|
2024-07-07 10:05:01 -04:00
|
|
|
|
|
2013-10-11 20:34:14 +05:30
|
|
|
|
class Track(Record):
|
|
|
|
|
|
|
|
|
|
|
|
def __init__(self, parent):
|
|
|
|
|
|
Record.__init__(self, parent)
|
2024-07-07 10:05:01 -04:00
|
|
|
|
self._struct = collections.OrderedDict(
|
|
|
|
|
|
[
|
|
|
|
|
|
("header_id", ("4s", b"rths")), # shtr
|
|
|
|
|
|
("header_length", ("I", 0x174)),
|
|
|
|
|
|
("start_at_pos_ms", ("I", 0)),
|
|
|
|
|
|
("stop_at_pos_ms", ("I", 0)),
|
|
|
|
|
|
("volume_gain", ("I", int(self.trackgain))),
|
|
|
|
|
|
("filetype", ("I", 1)),
|
|
|
|
|
|
("filename", ("256s", b"\x00" * 256)),
|
|
|
|
|
|
("bookmark", ("I", 0)),
|
|
|
|
|
|
("dontskip", ("B", 1)),
|
|
|
|
|
|
("remember", ("B", 0)),
|
|
|
|
|
|
("unintalbum", ("B", 0)),
|
|
|
|
|
|
("unknown", ("B", 0)),
|
|
|
|
|
|
("pregap", ("I", 0x200)),
|
|
|
|
|
|
("postgap", ("I", 0x200)),
|
|
|
|
|
|
("numsamples", ("I", 0)),
|
|
|
|
|
|
("unknown2", ("I", 0)),
|
|
|
|
|
|
("gapless", ("I", 0)),
|
|
|
|
|
|
("unknown3", ("I", 0)),
|
|
|
|
|
|
("albumid", ("I", 0)),
|
|
|
|
|
|
("track", ("H", 1)),
|
|
|
|
|
|
("disc", ("H", 0)),
|
|
|
|
|
|
("unknown4", ("Q", 0)),
|
|
|
|
|
|
("dbid", ("8s", 0)),
|
|
|
|
|
|
("artistid", ("I", 0)),
|
|
|
|
|
|
("unknown5", ("32s", b"\x00" * 32)),
|
|
|
|
|
|
]
|
|
|
|
|
|
)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
|
|
|
|
|
def populate(self, filename):
|
2024-07-07 10:05:01 -04:00
|
|
|
|
self["filename"] = self.path_to_ipod(filename).encode("utf-8")
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
|
|
|
|
|
if os.path.splitext(filename)[1].lower() in (".m4a", ".m4b", ".m4p", ".aa"):
|
|
|
|
|
|
self["filetype"] = 2
|
|
|
|
|
|
|
|
|
|
|
|
text = os.path.splitext(os.path.basename(filename))[0]
|
|
|
|
|
|
|
2016-08-27 21:02:59 +02:00
|
|
|
|
# Try to get album and artist information with mutagen
|
|
|
|
|
|
if mutagen:
|
|
|
|
|
|
audio = None
|
|
|
|
|
|
try:
|
2024-07-07 10:05:01 -04:00
|
|
|
|
audio = mutagen.File(filename, easy=True)
|
2016-08-27 21:02:59 +02:00
|
|
|
|
except:
|
2024-07-07 10:05:01 -04:00
|
|
|
|
print(
|
|
|
|
|
|
"Error calling mutagen. Possible invalid filename/ID3Tags (hyphen in filename?)"
|
|
|
|
|
|
)
|
2016-08-27 21:02:59 +02:00
|
|
|
|
if audio:
|
|
|
|
|
|
# Note: Rythmbox IPod plugin sets this value always 0.
|
|
|
|
|
|
self["stop_at_pos_ms"] = int(audio.info.length * 1000)
|
|
|
|
|
|
|
|
|
|
|
|
artist = audio.get("artist", ["Unknown"])[0]
|
|
|
|
|
|
if artist in self.artists:
|
|
|
|
|
|
self["artistid"] = self.artists.index(artist)
|
|
|
|
|
|
else:
|
|
|
|
|
|
self["artistid"] = len(self.artists)
|
|
|
|
|
|
self.artists.append(artist)
|
|
|
|
|
|
|
|
|
|
|
|
album = audio.get("album", ["Unknown"])[0]
|
|
|
|
|
|
if album in self.albums:
|
|
|
|
|
|
self["albumid"] = self.albums.index(album)
|
|
|
|
|
|
else:
|
|
|
|
|
|
self["albumid"] = len(self.albums)
|
|
|
|
|
|
self.albums.append(album)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
2016-08-27 21:02:59 +02:00
|
|
|
|
if audio.get("title", "") and audio.get("artist", ""):
|
|
|
|
|
|
text = " - ".join(audio.get("title", "") + audio.get("artist", ""))
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
|
|
|
|
|
# Handle the VoiceOverData
|
2016-08-27 17:42:01 +02:00
|
|
|
|
if isinstance(text, str):
|
2024-07-07 10:05:01 -04:00
|
|
|
|
text = text.encode("utf-8", "ignore")
|
2016-08-27 21:16:14 +02:00
|
|
|
|
self["dbid"] = hashlib.md5(text).digest()[:8]
|
2013-10-11 20:34:14 +05:30
|
|
|
|
self.text_to_speech(text, self["dbid"])
|
|
|
|
|
|
|
2024-07-07 10:05:01 -04:00
|
|
|
|
|
2013-10-11 20:34:14 +05:30
|
|
|
|
class PlaylistHeader(Record):
|
|
|
|
|
|
def __init__(self, parent):
|
|
|
|
|
|
self.base_offset = 0
|
|
|
|
|
|
Record.__init__(self, parent)
|
2024-07-07 10:05:01 -04:00
|
|
|
|
self._struct = collections.OrderedDict(
|
|
|
|
|
|
[
|
|
|
|
|
|
("header_id", ("4s", b"hphs")), # shph
|
|
|
|
|
|
("total_length", ("I", 0)),
|
|
|
|
|
|
("number_of_playlists", ("I", 0)),
|
|
|
|
|
|
("number_of_non_podcast_lists", ("2s", b"\xFF\xFF")),
|
|
|
|
|
|
("number_of_master_lists", ("2s", b"\x01\x00")),
|
|
|
|
|
|
("number_of_non_audiobook_lists", ("2s", b"\xFF\xFF")),
|
|
|
|
|
|
("unknown2", ("2s", b"\x00" * 2)),
|
|
|
|
|
|
]
|
|
|
|
|
|
)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
2016-08-27 21:16:14 +02:00
|
|
|
|
def construct(self, tracks):
|
2013-10-11 20:34:14 +05:30
|
|
|
|
# Build the master list
|
|
|
|
|
|
masterlist = Playlist(self)
|
2016-08-27 14:39:25 +02:00
|
|
|
|
verboseprint("[+] Adding master playlist")
|
2013-10-11 20:34:14 +05:30
|
|
|
|
masterlist.set_master(tracks)
|
|
|
|
|
|
chunks = [masterlist.construct(tracks)]
|
|
|
|
|
|
|
|
|
|
|
|
# Build all the remaining playlists
|
|
|
|
|
|
playlistcount = 1
|
|
|
|
|
|
for i in self.lists:
|
|
|
|
|
|
playlist = Playlist(self)
|
2016-08-27 14:39:25 +02:00
|
|
|
|
verboseprint("[+] Adding playlist", (i[0] if type(i) == type(()) else i))
|
2013-10-11 20:34:14 +05:30
|
|
|
|
playlist.populate(i)
|
|
|
|
|
|
construction = playlist.construct(tracks)
|
|
|
|
|
|
if playlist["number_of_songs"] > 0:
|
|
|
|
|
|
playlistcount += 1
|
|
|
|
|
|
chunks += [construction]
|
2016-02-04 14:36:58 +01:00
|
|
|
|
else:
|
2024-07-07 10:05:01 -04:00
|
|
|
|
print(
|
|
|
|
|
|
"Error: Playlist does not contain a single track. Skipping playlist."
|
|
|
|
|
|
)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
|
|
|
|
|
self["number_of_playlists"] = playlistcount
|
2016-01-24 12:14:14 +01:00
|
|
|
|
self["total_length"] = 0x14 + (self["number_of_playlists"] * 4)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
# Start the header
|
|
|
|
|
|
|
|
|
|
|
|
output = Record.construct(self)
|
|
|
|
|
|
offset = self.base_offset + self["total_length"]
|
|
|
|
|
|
|
|
|
|
|
|
for i in range(len(chunks)):
|
|
|
|
|
|
output += struct.pack("I", offset)
|
|
|
|
|
|
offset += len(chunks[i])
|
|
|
|
|
|
|
2016-08-27 18:08:16 +02:00
|
|
|
|
return output + b"".join(chunks)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
2024-07-07 10:05:01 -04:00
|
|
|
|
|
2013-10-11 20:34:14 +05:30
|
|
|
|
class Playlist(Record):
|
|
|
|
|
|
def __init__(self, parent):
|
|
|
|
|
|
self.listtracks = []
|
|
|
|
|
|
Record.__init__(self, parent)
|
2024-07-07 10:05:01 -04:00
|
|
|
|
self._struct = collections.OrderedDict(
|
|
|
|
|
|
[
|
|
|
|
|
|
("header_id", ("4s", b"lphs")), # shpl
|
|
|
|
|
|
("total_length", ("I", 0)),
|
|
|
|
|
|
("number_of_songs", ("I", 0)),
|
|
|
|
|
|
("number_of_nonaudio", ("I", 0)),
|
|
|
|
|
|
("dbid", ("8s", b"\x00" * 8)),
|
|
|
|
|
|
("listtype", ("I", 2)),
|
|
|
|
|
|
("unknown1", ("16s", b"\x00" * 16)),
|
|
|
|
|
|
]
|
|
|
|
|
|
)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
|
|
|
|
|
def set_master(self, tracks):
|
2016-01-17 14:04:46 +01:00
|
|
|
|
# By default use "All Songs" builtin voiceover (dbid all zero)
|
|
|
|
|
|
# Else generate alternative "All Songs" to fit the speaker voice of other playlists
|
2024-07-07 10:05:01 -04:00
|
|
|
|
if self.playlist_voiceover and (
|
|
|
|
|
|
Text2Speech.valid_tts["pico2wave"]
|
|
|
|
|
|
or Text2Speech.valid_tts["espeak"]
|
|
|
|
|
|
or Text2Speech.valid_tts["say"]
|
|
|
|
|
|
):
|
2016-08-27 21:16:14 +02:00
|
|
|
|
self["dbid"] = hashlib.md5(b"masterlist").digest()[:8]
|
2016-01-17 14:04:46 +01:00
|
|
|
|
self.text_to_speech("All songs", self["dbid"], True)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
self["listtype"] = 1
|
|
|
|
|
|
self.listtracks = tracks
|
|
|
|
|
|
|
|
|
|
|
|
def populate_m3u(self, data):
|
|
|
|
|
|
listtracks = []
|
|
|
|
|
|
for i in data:
|
|
|
|
|
|
if not i.startswith("#"):
|
|
|
|
|
|
path = i.strip()
|
2013-10-12 23:42:34 +05:30
|
|
|
|
if self.rename:
|
|
|
|
|
|
path = validate_unicode(path)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
listtracks.append(path)
|
|
|
|
|
|
return listtracks
|
|
|
|
|
|
|
|
|
|
|
|
def populate_pls(self, data):
|
|
|
|
|
|
sorttracks = []
|
|
|
|
|
|
for i in data:
|
2016-01-04 02:00:03 -08:00
|
|
|
|
dataarr = i.strip().split("=", 1)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
if dataarr[0].lower().startswith("file"):
|
|
|
|
|
|
num = int(dataarr[0][4:])
|
2016-08-27 17:42:01 +02:00
|
|
|
|
filename = urllib.parse.unquote(dataarr[1]).strip()
|
2024-07-07 10:05:01 -04:00
|
|
|
|
if filename.lower().startswith("file://"):
|
2013-10-11 20:34:14 +05:30
|
|
|
|
filename = filename[7:]
|
2013-10-12 23:42:34 +05:30
|
|
|
|
if self.rename:
|
|
|
|
|
|
filename = validate_unicode(filename)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
sorttracks.append((num, filename))
|
2024-07-07 10:05:01 -04:00
|
|
|
|
listtracks = [x for (_, x) in sorted(sorttracks)]
|
2013-10-11 20:34:14 +05:30
|
|
|
|
return listtracks
|
|
|
|
|
|
|
2024-07-07 10:05:01 -04:00
|
|
|
|
def populate_directory(self, playlistpath, recursive=True):
|
2016-04-05 20:58:35 +02:00
|
|
|
|
# Add all tracks inside the folder and its subfolders recursively.
|
|
|
|
|
|
# Folders containing no music and only a single Album
|
|
|
|
|
|
# would generate duplicated playlists. That is intended and "wont fix".
|
|
|
|
|
|
# Empty folders (inside the music path) will generate an error -> "wont fix".
|
|
|
|
|
|
listtracks = []
|
2024-07-07 10:05:01 -04:00
|
|
|
|
for dirpath, dirnames, filenames in os.walk(playlistpath):
|
2016-04-05 20:58:35 +02:00
|
|
|
|
dirnames.sort()
|
|
|
|
|
|
|
2016-04-05 22:03:10 +02:00
|
|
|
|
# Ignore any hidden directories
|
2016-04-06 18:10:24 +02:00
|
|
|
|
if "/." not in dirpath:
|
2024-07-07 10:05:01 -04:00
|
|
|
|
for filename in sorted(filenames, key=lambda x: x.lower()):
|
2016-04-05 22:03:10 +02:00
|
|
|
|
# Only add valid music files to playlist
|
2024-07-07 10:05:01 -04:00
|
|
|
|
if os.path.splitext(filename)[1].lower() in (
|
|
|
|
|
|
".mp3",
|
|
|
|
|
|
".m4a",
|
|
|
|
|
|
".m4b",
|
|
|
|
|
|
".m4p",
|
|
|
|
|
|
".aa",
|
|
|
|
|
|
".wav",
|
|
|
|
|
|
):
|
2016-04-05 22:03:10 +02:00
|
|
|
|
fullPath = os.path.abspath(os.path.join(dirpath, filename))
|
|
|
|
|
|
listtracks.append(fullPath)
|
2016-04-05 20:58:35 +02:00
|
|
|
|
if not recursive:
|
|
|
|
|
|
break
|
|
|
|
|
|
return listtracks
|
|
|
|
|
|
|
2013-10-11 20:34:14 +05:30
|
|
|
|
def remove_relatives(self, relative, filename):
|
|
|
|
|
|
base = os.path.dirname(os.path.abspath(filename))
|
|
|
|
|
|
if not os.path.exists(relative):
|
|
|
|
|
|
relative = os.path.join(base, relative)
|
2014-01-25 18:49:09 +05:30
|
|
|
|
fullPath = relative
|
|
|
|
|
|
return fullPath
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
2016-06-08 05:14:58 +05:30
|
|
|
|
def populate(self, obj):
|
2016-04-05 20:58:35 +02:00
|
|
|
|
# Create a playlist of the folder and all subfolders
|
2016-06-08 05:14:58 +05:30
|
|
|
|
if type(obj) == type(()):
|
|
|
|
|
|
self.listtracks = obj[1]
|
|
|
|
|
|
text = obj[0]
|
2016-04-05 20:58:35 +02:00
|
|
|
|
else:
|
2016-06-08 05:14:58 +05:30
|
|
|
|
filename = obj
|
|
|
|
|
|
if os.path.isdir(filename):
|
|
|
|
|
|
self.listtracks = self.populate_directory(filename)
|
|
|
|
|
|
text = os.path.splitext(os.path.basename(filename))[0]
|
2016-04-05 20:58:35 +02:00
|
|
|
|
else:
|
2016-06-08 05:14:58 +05:30
|
|
|
|
# Read the playlist file
|
2024-07-07 10:05:01 -04:00
|
|
|
|
with open(filename, "r", errors="replace") as f:
|
2016-06-08 05:14:58 +05:30
|
|
|
|
data = f.readlines()
|
|
|
|
|
|
|
|
|
|
|
|
extension = os.path.splitext(filename)[1].lower()
|
2024-07-07 10:05:01 -04:00
|
|
|
|
if extension == ".pls":
|
2016-06-08 05:14:58 +05:30
|
|
|
|
self.listtracks = self.populate_pls(data)
|
2024-07-07 10:05:01 -04:00
|
|
|
|
elif extension == ".m3u":
|
2016-06-08 05:14:58 +05:30
|
|
|
|
self.listtracks = self.populate_m3u(data)
|
|
|
|
|
|
else:
|
|
|
|
|
|
raise
|
|
|
|
|
|
|
|
|
|
|
|
# Ensure all paths are not relative to the playlist file
|
|
|
|
|
|
for i in range(len(self.listtracks)):
|
2024-07-07 10:05:01 -04:00
|
|
|
|
self.listtracks[i] = self.remove_relatives(
|
|
|
|
|
|
self.listtracks[i], filename
|
|
|
|
|
|
)
|
2016-06-08 05:14:58 +05:30
|
|
|
|
text = os.path.splitext(os.path.basename(filename))[0]
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
|
|
|
|
|
# Handle the VoiceOverData
|
2024-07-07 10:05:01 -04:00
|
|
|
|
self["dbid"] = hashlib.md5(text.encode("utf-8")).digest()[:8]
|
2013-10-11 20:34:14 +05:30
|
|
|
|
self.text_to_speech(text, self["dbid"], True)
|
|
|
|
|
|
|
2016-08-27 21:16:14 +02:00
|
|
|
|
def construct(self, tracks):
|
2013-10-11 20:34:14 +05:30
|
|
|
|
self["total_length"] = 44 + (4 * len(self.listtracks))
|
|
|
|
|
|
self["number_of_songs"] = 0
|
|
|
|
|
|
|
2016-08-27 18:08:16 +02:00
|
|
|
|
chunks = bytes()
|
2013-10-11 20:34:14 +05:30
|
|
|
|
for i in self.listtracks:
|
2016-02-04 14:36:58 +01:00
|
|
|
|
path = self.ipod_to_path(i)
|
|
|
|
|
|
position = -1
|
2013-10-12 00:10:02 +05:30
|
|
|
|
try:
|
2016-02-04 14:36:58 +01:00
|
|
|
|
position = tracks.index(path)
|
2013-10-12 00:10:02 +05:30
|
|
|
|
except:
|
2016-02-04 14:36:58 +01:00
|
|
|
|
# Print an error if no track was found.
|
|
|
|
|
|
# Empty playlists are handeled in the PlaylistHeader class.
|
2024-07-07 10:05:01 -04:00
|
|
|
|
print('Error: Could not find track "' + path + '".')
|
|
|
|
|
|
print(
|
|
|
|
|
|
"Maybe its an invalid FAT filesystem name. Please fix your playlist. Skipping track."
|
|
|
|
|
|
)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
if position > -1:
|
|
|
|
|
|
chunks += struct.pack("I", position)
|
|
|
|
|
|
self["number_of_songs"] += 1
|
|
|
|
|
|
self["number_of_nonaudio"] = self["number_of_songs"]
|
|
|
|
|
|
|
|
|
|
|
|
output = Record.construct(self)
|
|
|
|
|
|
return output + chunks
|
|
|
|
|
|
|
2024-07-07 10:05:01 -04:00
|
|
|
|
|
2013-10-11 20:34:14 +05:30
|
|
|
|
class Shuffler(object):
|
2024-07-07 10:05:01 -04:00
|
|
|
|
def __init__(
|
|
|
|
|
|
self,
|
|
|
|
|
|
path,
|
|
|
|
|
|
track_voiceover=False,
|
|
|
|
|
|
playlist_voiceover=False,
|
|
|
|
|
|
rename=False,
|
|
|
|
|
|
trackgain=0,
|
|
|
|
|
|
auto_dir_playlists=None,
|
|
|
|
|
|
auto_id3_playlists=None,
|
|
|
|
|
|
):
|
2016-06-08 02:57:04 +05:30
|
|
|
|
self.path = os.path.abspath(path)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
self.tracks = []
|
|
|
|
|
|
self.albums = []
|
|
|
|
|
|
self.artists = []
|
|
|
|
|
|
self.lists = []
|
|
|
|
|
|
self.tunessd = None
|
2016-08-27 13:19:21 +02:00
|
|
|
|
self.track_voiceover = track_voiceover
|
2016-04-06 22:08:28 +02:00
|
|
|
|
self.playlist_voiceover = playlist_voiceover
|
2013-10-12 23:42:34 +05:30
|
|
|
|
self.rename = rename
|
2014-06-24 15:10:57 +04:00
|
|
|
|
self.trackgain = trackgain
|
2016-06-08 05:14:58 +05:30
|
|
|
|
self.auto_dir_playlists = auto_dir_playlists
|
|
|
|
|
|
self.auto_id3_playlists = auto_id3_playlists
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
2013-10-12 18:32:00 +05:30
|
|
|
|
def initialize(self):
|
2024-07-07 10:05:01 -04:00
|
|
|
|
# remove existing voiceover files (they are either useless or will be overwritten anyway)
|
|
|
|
|
|
for dirname in (
|
|
|
|
|
|
"iPod_Control/Speakable/Playlists",
|
|
|
|
|
|
"iPod_Control/Speakable/Tracks",
|
|
|
|
|
|
):
|
|
|
|
|
|
shutil.rmtree(os.path.join(self.path, dirname), ignore_errors=True)
|
|
|
|
|
|
for dirname in (
|
|
|
|
|
|
"iPod_Control/iTunes",
|
|
|
|
|
|
"iPod_Control/Music",
|
|
|
|
|
|
"iPod_Control/Speakable/Playlists",
|
|
|
|
|
|
"iPod_Control/Speakable/Tracks",
|
|
|
|
|
|
):
|
|
|
|
|
|
make_dir_if_absent(os.path.join(self.path, dirname))
|
2013-10-12 18:32:00 +05:30
|
|
|
|
|
2013-10-11 20:34:14 +05:30
|
|
|
|
def dump_state(self):
|
2016-08-27 17:42:01 +02:00
|
|
|
|
print("Shuffle DB state")
|
|
|
|
|
|
print("Tracks", self.tracks)
|
|
|
|
|
|
print("Albums", self.albums)
|
|
|
|
|
|
print("Artists", self.artists)
|
|
|
|
|
|
print("Playlists", self.lists)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
|
|
|
|
|
def populate(self):
|
|
|
|
|
|
self.tunessd = TunesSD(self)
|
2024-07-07 10:05:01 -04:00
|
|
|
|
for dirpath, dirnames, filenames in os.walk(self.path):
|
2013-10-11 20:34:14 +05:30
|
|
|
|
dirnames.sort()
|
2016-06-08 03:43:32 +05:30
|
|
|
|
relpath = get_relpath(dirpath, self.path)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
# Ignore the speakable directory and any hidden directories
|
2024-07-07 10:05:01 -04:00
|
|
|
|
if (
|
|
|
|
|
|
not is_path_prefix("iPod_Control/Speakable", relpath)
|
|
|
|
|
|
and "/." not in dirpath
|
|
|
|
|
|
):
|
|
|
|
|
|
for filename in sorted(filenames, key=lambda x: x.lower()):
|
2016-08-27 14:18:15 +02:00
|
|
|
|
# Ignore hidden files
|
|
|
|
|
|
if not filename.startswith("."):
|
|
|
|
|
|
fullPath = os.path.abspath(os.path.join(dirpath, filename))
|
2024-07-07 10:05:01 -04:00
|
|
|
|
if os.path.splitext(filename)[1].lower() in (
|
|
|
|
|
|
".mp3",
|
|
|
|
|
|
".m4a",
|
|
|
|
|
|
".m4b",
|
|
|
|
|
|
".m4p",
|
|
|
|
|
|
".aa",
|
|
|
|
|
|
".wav",
|
|
|
|
|
|
):
|
2016-08-27 14:18:15 +02:00
|
|
|
|
self.tracks.append(fullPath)
|
|
|
|
|
|
if os.path.splitext(filename)[1].lower() in (".pls", ".m3u"):
|
|
|
|
|
|
self.lists.append(fullPath)
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
2016-04-05 20:58:35 +02:00
|
|
|
|
# Create automatic playlists in music directory.
|
|
|
|
|
|
# Ignore the (music) root and any hidden directories.
|
2024-07-07 10:05:01 -04:00
|
|
|
|
if (
|
|
|
|
|
|
self.auto_dir_playlists
|
|
|
|
|
|
and "iPod_Control/Music/" in dirpath
|
|
|
|
|
|
and "/." not in dirpath
|
|
|
|
|
|
):
|
2016-04-05 20:58:35 +02:00
|
|
|
|
# Only go to a specific depth. -1 is unlimted, 0 is ignored as there is already a master playlist.
|
2024-07-07 10:05:01 -04:00
|
|
|
|
depth = (
|
|
|
|
|
|
dirpath[len(self.path) + len(os.path.sep) :].count(os.path.sep) - 1
|
|
|
|
|
|
)
|
2016-06-08 05:15:16 +05:30
|
|
|
|
if self.auto_dir_playlists < 0 or depth <= self.auto_dir_playlists:
|
2016-04-05 20:58:35 +02:00
|
|
|
|
self.lists.append(os.path.abspath(dirpath))
|
|
|
|
|
|
|
2016-06-08 05:15:16 +05:30
|
|
|
|
if self.auto_id3_playlists != None:
|
2016-08-27 21:02:59 +02:00
|
|
|
|
if mutagen:
|
2024-07-07 10:05:01 -04:00
|
|
|
|
for grouped_list in group_tracks_by_id3_template(
|
|
|
|
|
|
self.tracks, self.auto_id3_playlists
|
|
|
|
|
|
):
|
2016-08-27 21:02:59 +02:00
|
|
|
|
self.lists.append(grouped_list)
|
|
|
|
|
|
else:
|
|
|
|
|
|
print("Error: No mutagen found. Cannot generate auto-id3-playlists.")
|
|
|
|
|
|
sys.exit(1)
|
2016-06-08 05:15:16 +05:30
|
|
|
|
|
2013-10-11 20:34:14 +05:30
|
|
|
|
def write_database(self):
|
2016-08-27 21:02:59 +02:00
|
|
|
|
print("Writing database. This may take a while...")
|
2024-07-07 10:05:01 -04:00
|
|
|
|
with open(
|
|
|
|
|
|
os.path.join(self.path, "iPod_Control", "iTunes", "iTunesSD"), "wb"
|
|
|
|
|
|
) as f:
|
2016-08-27 13:16:45 +02:00
|
|
|
|
try:
|
|
|
|
|
|
f.write(self.tunessd.construct())
|
|
|
|
|
|
except IOError as e:
|
2016-08-27 17:42:01 +02:00
|
|
|
|
print("I/O error({0}): {1}".format(e.errno, e.strerror))
|
|
|
|
|
|
print("Error: Writing iPod database failed.")
|
2016-08-27 13:16:45 +02:00
|
|
|
|
sys.exit(1)
|
2016-08-27 21:02:59 +02:00
|
|
|
|
|
2016-08-27 17:42:01 +02:00
|
|
|
|
print("Database written successfully:")
|
|
|
|
|
|
print("Tracks", len(self.tracks))
|
|
|
|
|
|
print("Albums", len(self.albums))
|
|
|
|
|
|
print("Artists", len(self.artists))
|
|
|
|
|
|
print("Playlists", len(self.lists))
|
2013-10-11 20:34:14 +05:30
|
|
|
|
|
2024-07-07 10:05:01 -04:00
|
|
|
|
|
2013-10-11 20:34:14 +05:30
|
|
|
|
#
|
|
|
|
|
|
# Read all files from the directory
|
|
|
|
|
|
# Construct the appropriate iTunesDB file
|
|
|
|
|
|
# Construct the appropriate iTunesSD file
|
|
|
|
|
|
# http://shuffle3db.wikispaces.com/iTunesSD3gen
|
2014-06-24 04:26:06 +04:00
|
|
|
|
# Use SVOX pico2wave and RHVoice to produce voiceover data
|
2013-10-11 20:34:14 +05:30
|
|
|
|
#
|
2013-10-12 00:10:02 +05:30
|
|
|
|
|
2024-07-07 10:05:01 -04:00
|
|
|
|
|
2013-10-12 00:10:02 +05:30
|
|
|
|
def check_unicode(path):
|
2024-07-07 10:05:01 -04:00
|
|
|
|
ret_flag = False # True if there is a recognizable file within this level
|
2013-10-12 18:32:00 +05:30
|
|
|
|
for item in os.listdir(path):
|
|
|
|
|
|
if os.path.isfile(os.path.join(path, item)):
|
2024-07-07 10:05:01 -04:00
|
|
|
|
if os.path.splitext(item)[1].lower() in audio_ext + list_ext:
|
2013-10-12 18:32:00 +05:30
|
|
|
|
ret_flag = True
|
|
|
|
|
|
if raises_unicode_error(item):
|
|
|
|
|
|
src = os.path.join(path, item)
|
2024-07-07 10:05:01 -04:00
|
|
|
|
dest = (
|
|
|
|
|
|
os.path.join(path, hash_error_unicode(item))
|
|
|
|
|
|
+ os.path.splitext(item)[1].lower()
|
|
|
|
|
|
)
|
|
|
|
|
|
print("Renaming %s -> %s" % (src, dest))
|
2013-10-12 18:32:00 +05:30
|
|
|
|
os.rename(src, dest)
|
|
|
|
|
|
else:
|
2024-07-07 10:05:01 -04:00
|
|
|
|
ret_flag = check_unicode(os.path.join(path, item)) or ret_flag
|
2013-10-12 18:32:00 +05:30
|
|
|
|
if ret_flag and raises_unicode_error(item):
|
|
|
|
|
|
src = os.path.join(path, item)
|
2013-10-12 23:42:34 +05:30
|
|
|
|
new_name = hash_error_unicode(item)
|
2013-10-12 18:32:00 +05:30
|
|
|
|
dest = os.path.join(path, new_name)
|
2024-07-07 10:05:01 -04:00
|
|
|
|
print("Renaming %s -> %s" % (src, dest))
|
2013-10-12 00:10:02 +05:30
|
|
|
|
os.rename(src, dest)
|
2013-10-12 18:32:00 +05:30
|
|
|
|
return ret_flag
|
|
|
|
|
|
|
2024-07-07 10:05:01 -04:00
|
|
|
|
|
2014-06-24 15:10:57 +04:00
|
|
|
|
def nonnegative_int(string):
|
|
|
|
|
|
try:
|
|
|
|
|
|
intval = int(string)
|
|
|
|
|
|
except ValueError:
|
|
|
|
|
|
raise argparse.ArgumentTypeError("'%s' must be an integer" % string)
|
|
|
|
|
|
|
2014-06-28 19:38:01 +05:30
|
|
|
|
if intval < 0 or intval > 99:
|
|
|
|
|
|
raise argparse.ArgumentTypeError("Track gain value should be in range 0-99")
|
2014-06-24 15:10:57 +04:00
|
|
|
|
return intval
|
|
|
|
|
|
|
2024-07-07 10:05:01 -04:00
|
|
|
|
|
2016-01-17 17:14:46 +05:30
|
|
|
|
def checkPathValidity(path):
|
|
|
|
|
|
if not os.path.isdir(result.path):
|
2016-08-27 17:42:01 +02:00
|
|
|
|
print("Error finding IPod directory. Maybe it is not connected or mounted?")
|
2016-01-17 17:14:46 +05:30
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
|
|
|
|
if not os.access(result.path, os.W_OK):
|
2024-07-07 10:05:01 -04:00
|
|
|
|
print("Unable to get write permissions in the IPod directory")
|
2016-01-17 17:14:46 +05:30
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
2024-07-07 10:05:01 -04:00
|
|
|
|
|
2016-01-23 22:25:59 +05:30
|
|
|
|
def handle_interrupt(signal, frame):
|
2016-08-27 17:42:01 +02:00
|
|
|
|
print("Interrupt detected, exiting...")
|
2016-01-23 22:25:59 +05:30
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
2016-06-08 05:14:58 +05:30
|
|
|
|
|
2024-07-07 10:05:01 -04:00
|
|
|
|
if __name__ == "__main__":
|
|
|
|
|
|
signal.signal(signal.SIGINT, handle_interrupt)
|
2016-08-27 14:39:25 +02:00
|
|
|
|
|
2024-07-07 10:05:01 -04:00
|
|
|
|
parser = argparse.ArgumentParser(
|
|
|
|
|
|
description="Python script for building the Track and Playlist database "
|
|
|
|
|
|
"for the newer gen IPod Shuffle. Version 1.5"
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
parser.add_argument(
|
|
|
|
|
|
"-t",
|
|
|
|
|
|
"--track-voiceover",
|
|
|
|
|
|
action="store_true",
|
|
|
|
|
|
help="Enable track voiceover feature",
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
parser.add_argument(
|
|
|
|
|
|
"-p",
|
|
|
|
|
|
"--playlist-voiceover",
|
|
|
|
|
|
action="store_true",
|
|
|
|
|
|
help="Enable playlist voiceover feature",
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
parser.add_argument(
|
|
|
|
|
|
"-u",
|
|
|
|
|
|
"--rename-unicode",
|
|
|
|
|
|
action="store_true",
|
|
|
|
|
|
help="Rename files causing unicode errors, will do minimal required renaming",
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
parser.add_argument(
|
|
|
|
|
|
"-g",
|
|
|
|
|
|
"--track-gain",
|
|
|
|
|
|
type=nonnegative_int,
|
|
|
|
|
|
default="0",
|
|
|
|
|
|
help="Specify volume gain (0-99) for all tracks; "
|
|
|
|
|
|
"0 (default) means no gain and is usually fine; "
|
|
|
|
|
|
"e.g. 60 is very loud even on minimal player volume",
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
parser.add_argument(
|
|
|
|
|
|
"-d",
|
|
|
|
|
|
"--auto-dir-playlists",
|
|
|
|
|
|
type=int,
|
|
|
|
|
|
default=None,
|
|
|
|
|
|
const=-1,
|
|
|
|
|
|
nargs="?",
|
|
|
|
|
|
help="Generate automatic playlists for each folder recursively inside "
|
|
|
|
|
|
'"IPod_Control/Music/". You can optionally limit the depth: '
|
|
|
|
|
|
"0=root, 1=artist, 2=album, n=subfoldername, default=-1 (No Limit).",
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
parser.add_argument(
|
|
|
|
|
|
"-i",
|
|
|
|
|
|
"--auto-id3-playlists",
|
|
|
|
|
|
type=str,
|
|
|
|
|
|
default=None,
|
|
|
|
|
|
metavar="ID3_TEMPLATE",
|
|
|
|
|
|
const="{artist}",
|
|
|
|
|
|
nargs="?",
|
|
|
|
|
|
help="Generate automatic playlists based on the id3 tags of any music "
|
|
|
|
|
|
"added to the iPod. You can optionally specify a template string "
|
|
|
|
|
|
"based on which id3 tags are used to generate playlists. For eg. "
|
|
|
|
|
|
"'{artist} - {album}' will use the pair of artist and album to group "
|
|
|
|
|
|
"tracks under one playlist. Similarly '{genre}' will group tracks based "
|
|
|
|
|
|
"on their genre tag. Default template used is '{artist}'",
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
parser.add_argument(
|
|
|
|
|
|
"-v",
|
|
|
|
|
|
"--verbose",
|
|
|
|
|
|
action="store_true",
|
|
|
|
|
|
help="Show verbose output of database generation.",
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
parser.add_argument("path", help="Path to the IPod's root directory")
|
2016-06-08 05:14:58 +05:30
|
|
|
|
|
2013-10-11 20:34:14 +05:30
|
|
|
|
result = parser.parse_args()
|
|
|
|
|
|
|
2016-08-27 14:39:25 +02:00
|
|
|
|
# Enable verbose printing if desired
|
2016-08-27 18:08:16 +02:00
|
|
|
|
verboseprint = print if result.verbose else lambda *a, **k: None
|
2016-08-27 14:39:25 +02:00
|
|
|
|
|
2016-01-17 17:14:46 +05:30
|
|
|
|
checkPathValidity(result.path)
|
2016-01-17 12:15:43 +01:00
|
|
|
|
|
2013-10-12 18:32:00 +05:30
|
|
|
|
if result.rename_unicode:
|
|
|
|
|
|
check_unicode(result.path)
|
2013-10-12 00:10:02 +05:30
|
|
|
|
|
2016-08-27 21:02:59 +02:00
|
|
|
|
if not mutagen:
|
2024-07-07 10:05:01 -04:00
|
|
|
|
print(
|
|
|
|
|
|
"Warning: No mutagen found. Database will not contain any album nor artist information."
|
|
|
|
|
|
)
|
2016-08-27 21:02:59 +02:00
|
|
|
|
|
2016-08-27 14:39:56 +02:00
|
|
|
|
verboseprint("Playlist voiceover requested:", result.playlist_voiceover)
|
|
|
|
|
|
verboseprint("Track voiceover requested:", result.track_voiceover)
|
2024-07-07 10:05:01 -04:00
|
|
|
|
if result.track_voiceover or result.playlist_voiceover:
|
2016-08-27 14:39:56 +02:00
|
|
|
|
if not Text2Speech.check_support():
|
2016-08-27 17:42:01 +02:00
|
|
|
|
print("Error: Did not find any voiceover program. Voiceover disabled.")
|
2016-08-27 13:19:21 +02:00
|
|
|
|
result.track_voiceover = False
|
2016-06-08 05:14:58 +05:30
|
|
|
|
result.playlist_voiceover = False
|
2016-08-27 14:39:56 +02:00
|
|
|
|
else:
|
|
|
|
|
|
verboseprint("Voiceover available.")
|
2016-01-13 04:08:49 +05:30
|
|
|
|
|
2024-07-07 10:05:01 -04:00
|
|
|
|
shuffle = Shuffler(
|
|
|
|
|
|
result.path,
|
|
|
|
|
|
track_voiceover=result.track_voiceover,
|
|
|
|
|
|
playlist_voiceover=result.playlist_voiceover,
|
|
|
|
|
|
rename=result.rename_unicode,
|
|
|
|
|
|
trackgain=result.track_gain,
|
|
|
|
|
|
auto_dir_playlists=result.auto_dir_playlists,
|
|
|
|
|
|
auto_id3_playlists=result.auto_id3_playlists,
|
|
|
|
|
|
)
|
2013-10-12 18:32:00 +05:30
|
|
|
|
shuffle.initialize()
|
2013-10-11 20:34:14 +05:30
|
|
|
|
shuffle.populate()
|
|
|
|
|
|
shuffle.write_database()
|