MediaCMS backend, initial commit

This commit is contained in:
Markos Gogoulos
2020-12-15 23:33:43 +02:00
commit 75430de2e0
85 changed files with 10558 additions and 0 deletions

0
files/__init__.py Normal file
View File

87
files/admin.py Normal file
View File

@@ -0,0 +1,87 @@
from django.contrib import admin
from .models import (
Media,
Encoding,
EncodeProfile,
Category,
Comment,
Tag,
Language,
Subtitle,
)
class CommentAdmin(admin.ModelAdmin):
search_fields = ["text"]
list_display = ["text", "add_date", "user", "media"]
ordering = ("-add_date",)
readonly_fields = ("user", "media", "parent")
class MediaAdmin(admin.ModelAdmin):
search_fields = ["title"]
list_display = [
"title",
"user",
"add_date",
"media_type",
"duration",
"state",
"is_reviewed",
"encoding_status",
"featured",
"get_comments_count",
]
list_filter = ["state", "is_reviewed", "encoding_status", "featured", "category"]
ordering = ("-add_date",)
readonly_fields = ("user", "tags", "category", "channel")
def get_comments_count(self, obj):
return obj.comments.count()
get_comments_count.short_description = "Comments count"
class CategoryAdmin(admin.ModelAdmin):
search_fields = ["title"]
list_display = ["title", "user", "add_date", "is_global", "media_count"]
list_filter = ["is_global"]
ordering = ("-add_date",)
readonly_fields = ("user", "media_count")
class TagAdmin(admin.ModelAdmin):
search_fields = ["title"]
list_display = ["title", "user", "media_count"]
readonly_fields = ("user", "media_count")
class EncodeProfileAdmin(admin.ModelAdmin):
list_display = ("name", "extension", "resolution", "codec", "description", "active")
list_filter = ["extension", "resolution", "codec", "active"]
search_fields = ["name", "extension", "resolution", "codec", "description"]
list_per_page = 100
fields = ("name", "extension", "resolution", "codec", "description", "active")
class LanguageAdmin(admin.ModelAdmin):
pass
class SubtitleAdmin(admin.ModelAdmin):
pass
class EncodingAdmin(admin.ModelAdmin):
pass
admin.site.register(EncodeProfile, EncodeProfileAdmin)
admin.site.register(Comment, CommentAdmin)
admin.site.register(Media, MediaAdmin)
admin.site.register(Encoding, EncodingAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Tag, TagAdmin)
admin.site.register(Subtitle, SubtitleAdmin)
admin.site.register(Language, LanguageAdmin)

5
files/apps.py Normal file
View File

@@ -0,0 +1,5 @@
from django.apps import AppConfig
class FilesConfig(AppConfig):
name = "files"

77
files/backends.py Normal file
View File

@@ -0,0 +1,77 @@
# ffmpeg only backend
from subprocess import PIPE, Popen
import locale
import re
import logging
logger = logging.getLogger(__name__)
class VideoEncodingError(Exception):
def __init__(self, *args, **kwargs):
self.message = args[0]
super(VideoEncodingError, self).__init__(*args, **kwargs)
RE_TIMECODE = re.compile(r"time=(\d+:\d+:\d+.\d+)")
console_encoding = locale.getdefaultlocale()[1] or "UTF-8"
class FFmpegBackend(object):
name = "FFmpeg"
def __init__(self):
pass
def _spawn(self, cmd):
try:
return Popen(
cmd,
shell=False,
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
close_fds=True,
)
except OSError as e:
raise VideoEncodingError("Error while running ffmpeg", e)
def _check_returncode(self, process):
ret = {}
stdout, stderr = process.communicate()
ret["code"] = process.returncode
return ret
def encode(self, cmd):
process = self._spawn(cmd)
buf = output = ""
while True:
out = process.stderr.read(10)
if not out:
break
try:
out = out.decode(console_encoding)
except UnicodeDecodeError:
out = ""
output = output[-500:] + out
buf = buf[-500:] + out
try:
line, buf = buf.split("\r", 1)
except BaseException:
continue
progress = RE_TIMECODE.findall(line)
if progress:
progress = progress[0]
yield progress
process_check = self._check_returncode(process)
if process_check["code"] != 0:
raise VideoEncodingError(output[-1000:]) # output could be huge
if not output:
raise VideoEncodingError("No output from FFmpeg.")
yield output[-1000:] # output could be huge

View File

@@ -0,0 +1,40 @@
from django.conf import settings
from .methods import is_mediacms_editor, is_mediacms_manager
def stuff(request):
"""Pass settings to the frontend"""
ret = {}
if request.is_secure():
# in case session is https, pass this setting so
# that the frontend uses https too
ret["FRONTEND_HOST"] = settings.SSL_FRONTEND_HOST
else:
ret["FRONTEND_HOST"] = settings.FRONTEND_HOST
ret["DEFAULT_THEME"] = settings.DEFAULT_THEME
ret["PORTAL_NAME"] = settings.PORTAL_NAME
ret["LOAD_FROM_CDN"] = settings.LOAD_FROM_CDN
ret["CAN_LOGIN"] = settings.LOGIN_ALLOWED
ret["CAN_REGISTER"] = settings.REGISTER_ALLOWED
ret["CAN_UPLOAD_MEDIA"] = settings.UPLOAD_MEDIA_ALLOWED
ret["CAN_LIKE_MEDIA"] = settings.CAN_LIKE_MEDIA
ret["CAN_DISLIKE_MEDIA"] = settings.CAN_DISLIKE_MEDIA
ret["CAN_REPORT_MEDIA"] = settings.CAN_REPORT_MEDIA
ret["CAN_SHARE_MEDIA"] = settings.CAN_SHARE_MEDIA
ret["UPLOAD_MAX_SIZE"] = settings.UPLOAD_MAX_SIZE
ret["UPLOAD_MAX_FILES_NUMBER"] = settings.UPLOAD_MAX_FILES_NUMBER
ret["PRE_UPLOAD_MEDIA_MESSAGE"] = settings.PRE_UPLOAD_MEDIA_MESSAGE
ret[
"POST_UPLOAD_AUTHOR_MESSAGE_UNLISTED_NO_COMMENTARY"
] = settings.POST_UPLOAD_AUTHOR_MESSAGE_UNLISTED_NO_COMMENTARY
ret["IS_MEDIACMS_ADMIN"] = request.user.is_superuser
ret["IS_MEDIACMS_EDITOR"] = is_mediacms_editor(request.user)
ret["IS_MEDIACMS_MANAGER"] = is_mediacms_manager(request.user)
ret["ALLOW_RATINGS"] = settings.ALLOW_RATINGS
ret[
"ALLOW_RATINGS_CONFIRMED_EMAIL_ONLY"
] = settings.ALLOW_RATINGS_CONFIRMED_EMAIL_ONLY
ret[
"VIDEO_PLAYER_FEATURED_VIDEO_ON_INDEX_PAGE"
] = settings.VIDEO_PLAYER_FEATURED_VIDEO_ON_INDEX_PAGE
return ret

4
files/exceptions.py Normal file
View File

@@ -0,0 +1,4 @@
class VideoEncodingError(Exception):
def __init__(self, *args, **kwargs):
self.message = args[0]
super(VideoEncodingError, self).__init__(*args, **kwargs)

26
files/feeds.py Normal file
View File

@@ -0,0 +1,26 @@
from django.contrib.syndication.views import Feed
from django.urls import reverse
from django.db.models import Q
from .models import Media
class RssMediaFeed(Feed):
title = "Latest Media"
link = "/media"
description = "Latest Media RSS feed"
def items(self):
basic_query = Q(listable=True)
media = Media.objects.filter(basic_query).order_by("-add_date")
media = media.prefetch_related("user")
return media[:40]
def item_title(self, item):
return item.title
def item_description(self, item):
return item.description
def item_link(self, item):
return reverse("get_media") + "?m={0}".format(item.friendly_token)

95
files/forms.py Normal file
View File

@@ -0,0 +1,95 @@
from django import forms
from .models import Media, Subtitle
from .methods import is_mediacms_editor, get_next_state
class MultipleSelect(forms.CheckboxSelectMultiple):
input_type = "checkbox"
class MediaForm(forms.ModelForm):
new_tags = forms.CharField(
label="Tags", help_text="a comma separated list of new tags.", required=False
)
class Meta:
model = Media
fields = (
"title",
"category",
"new_tags",
"add_date",
"uploaded_poster",
"description",
"state",
"enable_comments",
"featured",
"thumbnail_time",
"reported_times",
"is_reviewed",
)
widgets = {
"tags": MultipleSelect(),
}
def __init__(self, user, *args, **kwargs):
self.user = user
super(MediaForm, self).__init__(*args, **kwargs)
if self.instance.media_type != "video":
self.fields.pop("thumbnail_time")
if not is_mediacms_editor(user):
self.fields.pop("featured")
self.fields.pop("reported_times")
self.fields.pop("is_reviewed")
self.fields["new_tags"].initial = ", ".join(
[tag.title for tag in self.instance.tags.all()]
)
def clean_uploaded_poster(self):
image = self.cleaned_data.get("uploaded_poster", False)
if image:
if image.size > 5 * 1024 * 1024:
raise forms.ValidationError("Image file too large ( > 5mb )")
return image
def save(self, *args, **kwargs):
data = self.cleaned_data
state = data.get("state")
if state != self.initial["state"]:
self.instance.state = get_next_state(
self.user, self.initial["state"], self.instance.state
)
media = super(MediaForm, self).save(*args, **kwargs)
return media
class SubtitleForm(forms.ModelForm):
class Meta:
model = Subtitle
fields = ["language", "subtitle_file"]
def __init__(self, media_item, *args, **kwargs):
super(SubtitleForm, self).__init__(*args, **kwargs)
self.instance.media = media_item
def save(self, *args, **kwargs):
self.instance.user = self.instance.media.user
media = super(SubtitleForm, self).save(*args, **kwargs)
return media
class ContactForm(forms.Form):
from_email = forms.EmailField(required=True)
name = forms.CharField(required=False)
message = forms.CharField(widget=forms.Textarea, required=True)
def __init__(self, user, *args, **kwargs):
super(ContactForm, self).__init__(*args, **kwargs)
self.fields["name"].label = "Your name:"
self.fields["from_email"].label = "Your email:"
self.fields["message"].label = "Please add your message here and submit:"
self.user = user
if user.is_authenticated:
self.fields.pop("name")
self.fields.pop("from_email")

754
files/helpers.py Normal file
View File

@@ -0,0 +1,754 @@
# Kudos to Werner Robitza, AVEQ GmbH, for helping with ffmpeg
# related content
import os
import math
import shutil
import tempfile
import random
import hashlib
import subprocess
import json
from fractions import Fraction
import filetype
from django.conf import settings
CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
CRF_ENCODING_NUM_SECONDS = 2 # 0 * 60 # videos with greater duration will get
# CRF encoding and not two-pass
# Encoding individual chunks may yield quality variations if you use a
# too low bitrate, so if you go for the chunk-based variant
# you should use CRF encoding.
MAX_RATE_MULTIPLIER = 1.5
BUF_SIZE_MULTIPLIER = 1.5
# in seconds, anything between 2 and 6 makes sense
KEYFRAME_DISTANCE = 4
KEYFRAME_DISTANCE_MIN = 2
# speed presets
# see https://trac.ffmpeg.org/wiki/Encode/H.264
X26x_PRESET = "medium" # "medium"
X265_PRESET = "medium"
X26x_PRESET_BIG_HEIGHT = "faster"
# VP9_SPEED = 1 # between 0 and 4, lower is slower
VP9_SPEED = 2
VIDEO_CRFS = {
"h264_baseline": 23,
"h264": 23,
"h265": 28,
"vp9": 32,
}
# video rates for 25 or 60 fps input, for different codecs, in kbps
VIDEO_BITRATES = {
"h264": {
25: {
240: 300,
360: 500,
480: 1000,
720: 2500,
1080: 4500,
1440: 9000,
2160: 18000,
},
60: {720: 3500, 1080: 7500, 1440: 18000, 2160: 40000},
},
"h265": {
25: {
240: 150,
360: 275,
480: 500,
720: 1024,
1080: 1800,
1440: 4500,
2160: 10000,
},
60: {720: 1800, 1080: 3000, 1440: 8000, 2160: 18000},
},
"vp9": {
25: {
240: 150,
360: 275,
480: 500,
720: 1024,
1080: 1800,
1440: 4500,
2160: 10000,
},
60: {720: 1800, 1080: 3000, 1440: 8000, 2160: 18000},
},
}
AUDIO_ENCODERS = {"h264": "aac", "h265": "aac", "vp9": "libopus"}
AUDIO_BITRATES = {"h264": 128, "h265": 128, "vp9": 96}
EXTENSIONS = {"h264": "mp4", "h265": "mp4", "vp9": "webm"}
VIDEO_PROFILES = {"h264": "main", "h265": "main"}
def get_portal_workflow():
return settings.PORTAL_WORKFLOW
def get_default_state(user=None):
# possible states given the portal workflow setting
state = "private"
if settings.PORTAL_WORKFLOW == "public":
state = "public"
if settings.PORTAL_WORKFLOW == "unlisted":
state = "unlisted"
if settings.PORTAL_WORKFLOW == "private_verified":
if user and user.advancedUser:
state = "unlisted"
return state
def get_file_name(filename):
return filename.split("/")[-1]
def get_file_type(filename):
if not os.path.exists(filename):
return None
file_type = None
kind = filetype.guess(filename)
if kind is not None:
if kind.mime.startswith("video"):
file_type = "video"
elif kind.mime.startswith("image"):
file_type = "image"
elif kind.mime.startswith("audio"):
file_type = "audio"
elif "pdf" in kind.mime:
file_type = "pdf"
else:
# TODO: do something for files not supported by filetype lib
pass
return file_type
def rm_file(filename):
if os.path.isfile(filename):
try:
os.remove(filename)
return True
except OSError:
pass
return False
def rm_files(filenames):
if isinstance(filenames, list):
for filename in filenames:
rm_file(filename)
return True
def rm_dir(directory):
if os.path.isdir(directory):
# refuse to delete a dir inside project BASE_DIR
if directory.startswith(settings.BASE_DIR):
try:
shutil.rmtree(directory)
return True
except (FileNotFoundError, PermissionError):
pass
return False
def url_from_path(filename):
# TODO: find a way to preserver http - https ...
return "{0}{1}".format(
settings.MEDIA_URL, filename.replace(settings.MEDIA_ROOT, "")
)
def create_temp_file(suffix=None, dir=settings.TEMP_DIRECTORY):
tf = tempfile.NamedTemporaryFile(delete=False, suffix=suffix, dir=dir)
return tf.name
def create_temp_dir(suffix=None, dir=settings.TEMP_DIRECTORY):
td = tempfile.mkdtemp(dir=dir)
return td
def produce_friendly_token(token_len=settings.FRIENDLY_TOKEN_LEN):
token = ""
while len(token) != token_len:
token += CHARS[random.randint(0, len(CHARS) - 1)]
return token
def clean_friendly_token(token):
# cleans token
for char in token:
if char not in CHARS:
token.replace(char, "")
return token
def mask_ip(ip_address):
return hashlib.md5(ip_address.encode("utf-8")).hexdigest()
def run_command(cmd, cwd=None):
"""
Run a command directly
"""
if isinstance(cmd, str):
cmd = cmd.split()
ret = {}
if cwd:
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd
)
else:
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
# TODO: catch unicodedecodeerrors here...
if process.returncode == 0:
try:
ret["out"] = stdout.decode("utf-8")
except BaseException:
ret["out"] = ""
try:
ret["error"] = stderr.decode("utf-8")
except BaseException:
ret["error"] = ""
else:
try:
ret["error"] = stderr.decode("utf-8")
except BaseException:
ret["error"] = ""
return ret
def media_file_info(input_file):
"""
Get the info about an input file, as determined by ffprobe
Returns a dict, with the keys:
- `filename`: Filename
- `file_size`: Size of the file in bytes
- `video_duration`: Duration of the video in `s.msec`
- `video_frame_rate`: Framerate in Hz
- `video_bitrate`: Bitrate of the video stream in kBit/s
- `video_width`: Width in pixels
- `video_height`: Height in pixels
- `video_codec`: Video codec
- `audio_duration`: Duration of the audio in `s.msec`
- `audio_sample_rate`: Audio sample rate in Hz
- `audio_codec`: Audio codec name (`aac`)
- `audio_bitrate`: Bitrate of the video stream in kBit/s
Also returns the video and audio info raw from ffprobe.
"""
ret = {}
if not os.path.isfile(input_file):
ret["fail"] = True
return ret
video_info = {}
audio_info = {}
cmd = ["stat", "-c", "%s", input_file]
stdout = run_command(cmd).get("out")
if stdout:
file_size = int(stdout.strip())
else:
ret["fail"] = True
return ret
cmd = ["md5sum", input_file]
stdout = run_command(cmd).get("out")
if stdout:
md5sum = stdout.split()[0]
else:
md5sum = ""
cmd = [
settings.FFPROBE_COMMAND,
"-loglevel",
"error",
"-show_streams",
"-show_entries",
"format=format_name",
"-of",
"json",
input_file,
]
stdout = run_command(cmd).get("out")
try:
info = json.loads(stdout)
except TypeError:
ret["fail"] = True
return ret
has_video = False
has_audio = False
for stream_info in info["streams"]:
if stream_info["codec_type"] == "video":
video_info = stream_info
has_video = True
if info.get("format") and info["format"].get("format_name", "") in [
"tty",
"image2",
"image2pipe",
"bin",
"png_pipe",
"gif",
]:
ret["fail"] = True
return ret
elif stream_info["codec_type"] == "audio":
audio_info = stream_info
has_audio = True
if not has_video:
ret["is_video"] = False
ret["is_audio"] = has_audio
ret["audio_info"] = audio_info
return ret
if "duration" in video_info.keys():
video_duration = float(video_info["duration"])
elif "tags" in video_info.keys() and "DURATION" in video_info["tags"]:
duration_str = video_info["tags"]["DURATION"]
try:
hms, msec = duration_str.split(".")
except ValueError:
hms, msec = duration_str.split(",")
total_dur = sum(
int(x) * 60 ** i for i, x in enumerate(reversed(hms.split(":")))
)
video_duration = total_dur + float("0." + msec)
else:
# fallback to format, eg for webm
cmd = [
settings.FFPROBE_COMMAND,
"-loglevel",
"error",
"-show_format",
"-of",
"json",
input_file,
]
stdout = run_command(cmd).get("out")
format_info = json.loads(stdout)["format"]
try:
video_duration = float(format_info["duration"])
except KeyError:
ret["fail"] = True
return ret
if "bit_rate" in video_info.keys():
video_bitrate = round(float(video_info["bit_rate"]) / 1024.0, 2)
else:
cmd = [
settings.FFPROBE_COMMAND,
"-loglevel",
"error",
"-select_streams",
"v",
"-show_entries",
"packet=size",
"-of",
"compact=p=0:nk=1",
input_file,
]
stdout = run_command(cmd).get("out")
stream_size = sum([int(l) for l in stdout.split("\n") if l != ""])
video_bitrate = round((stream_size * 8 / 1024.0) / video_duration, 2)
ret = {
"filename": input_file,
"file_size": file_size,
"video_duration": video_duration,
"video_frame_rate": float(Fraction(video_info["r_frame_rate"])),
"video_bitrate": video_bitrate,
"video_width": video_info["width"],
"video_height": video_info["height"],
"video_codec": video_info["codec_name"],
"has_video": has_video,
"has_audio": has_audio,
}
if has_audio:
audio_duration = 1
if "duration" in audio_info.keys():
audio_duration = float(audio_info["duration"])
elif "tags" in audio_info.keys() and "DURATION" in audio_info["tags"]:
duration_str = audio_info["tags"]["DURATION"]
try:
hms, msec = duration_str.split(".")
except ValueError:
hms, msec = duration_str.split(",")
total_dur = sum(
int(x) * 60 ** i for i, x in enumerate(reversed(hms.split(":")))
)
audio_duration = total_dur + float("0." + msec)
else:
# fallback to format, eg for webm
cmd = [
settings.FFPROBE_COMMAND,
"-loglevel",
"error",
"-show_format",
"-of",
"json",
input_file,
]
stdout = run_command(cmd).get("out")
format_info = json.loads(stdout)["format"]
audio_duration = float(format_info["duration"])
if "bit_rate" in audio_info.keys():
audio_bitrate = round(float(audio_info["bit_rate"]) / 1024.0, 2)
else:
# fall back to calculating from accumulated frame duration
cmd = [
settings.FFPROBE_COMMAND,
"-loglevel",
"error",
"-select_streams",
"a",
"-show_entries",
"packet=size",
"-of",
"compact=p=0:nk=1",
input_file,
]
stdout = run_command(cmd).get("out")
stream_size = sum([int(l) for l in stdout.split("\n") if l != ""])
audio_bitrate = round((stream_size * 8 / 1024.0) / audio_duration, 2)
ret.update(
{
"audio_duration": audio_duration,
"audio_sample_rate": audio_info["sample_rate"],
"audio_codec": audio_info["codec_name"],
"audio_bitrate": audio_bitrate,
"audio_channels": audio_info["channels"],
}
)
ret["video_info"] = video_info
ret["audio_info"] = audio_info
ret["is_video"] = True
ret["md5sum"] = md5sum
return ret
def calculate_seconds(duration):
# returns seconds, given a ffmpeg extracted string
ret = 0
if isinstance(duration, str):
duration = duration.split(":")
if len(duration) != 3:
return ret
else:
return ret
ret += int(float(duration[2]))
ret += int(float(duration[1])) * 60
ret += int(float(duration[0])) * 60 * 60
return ret
def show_file_size(size):
if size:
size = size / 1000000
size = round(size, 1)
size = "{0}MB".format(str(size))
return size
def get_base_ffmpeg_command(
input_file,
output_file,
has_audio,
codec,
encoder,
audio_encoder,
target_fps,
target_height,
target_rate,
target_rate_audio,
pass_file,
pass_number,
enc_type,
chunk,
):
"""Get the base command for a specific codec, height/rate, and pass
Arguments:
input_file {str} -- input file name
output_file {str} -- output file name
has_audio {bool} -- does the input have audio?
codec {str} -- video codec
encoder {str} -- video encoder
audio_encoder {str} -- audio encoder
target_fps {int} -- target FPS
target_height {int} -- height
target_rate {int} -- target bitrate in kbps
target_rate_audio {int} -- audio target bitrate
pass_file {str} -- path to temp pass file
pass_number {int} -- number of passes
enc_type {str} -- encoding type (twopass or crf)
"""
target_fps = int(target_fps)
# avoid Frame rate very high for a muxer not efficiently supporting it.
if target_fps > 90:
target_fps = 90
base_cmd = [
settings.FFMPEG_COMMAND,
"-y",
"-i",
input_file,
"-c:v",
encoder,
"-filter:v",
"scale=-2:" + str(target_height) + ",fps=fps=" + str(target_fps),
# always convert to 4:2:0 -- FIXME: this could be also 4:2:2
# but compatibility will suffer
"-pix_fmt",
"yuv420p",
]
if enc_type == "twopass":
base_cmd.extend(["-b:v", str(target_rate) + "k"])
elif enc_type == "crf":
base_cmd.extend(["-crf", str(VIDEO_CRFS[codec])])
if encoder == "libvpx-vp9":
base_cmd.extend(["-b:v", str(target_rate) + "k"])
if has_audio:
base_cmd.extend(
[
"-c:a",
audio_encoder,
"-b:a",
str(target_rate_audio) + "k",
# stereo audio only, see https://trac.ffmpeg.org/ticket/5718
"-ac",
"2",
]
)
# get keyframe distance in frames
keyframe_distance = int(target_fps * KEYFRAME_DISTANCE)
# start building the command
cmd = base_cmd[:]
# preset settings
if encoder == "libvpx-vp9":
if pass_number == 1:
speed = 4
else:
speed = VP9_SPEED
elif encoder in ["libx264"]:
preset = X26x_PRESET
elif encoder in ["libx265"]:
preset = X265_PRESET
if target_height >= 720:
preset = X26x_PRESET_BIG_HEIGHT
if encoder == "libx264":
level = "4.2" if target_height <= 1080 else "5.2"
x264_params = [
"keyint=" + str(keyframe_distance * 2),
"keyint_min=" + str(keyframe_distance),
]
cmd.extend(
[
"-maxrate",
str(int(int(target_rate) * MAX_RATE_MULTIPLIER)) + "k",
"-bufsize",
str(int(int(target_rate) * BUF_SIZE_MULTIPLIER)) + "k",
"-force_key_frames",
"expr:gte(t,n_forced*" + str(KEYFRAME_DISTANCE) + ")",
"-x264-params",
":".join(x264_params),
"-preset",
preset,
"-profile:v",
VIDEO_PROFILES[codec],
"-level",
level,
]
)
if enc_type == "twopass":
cmd.extend(["-passlogfile", pass_file, "-pass", pass_number])
elif encoder == "libx265":
x265_params = [
"vbv-maxrate=" + str(int(int(target_rate) * MAX_RATE_MULTIPLIER)),
"vbv-bufsize=" + str(int(int(target_rate) * BUF_SIZE_MULTIPLIER)),
"keyint=" + str(keyframe_distance * 2),
"keyint_min=" + str(keyframe_distance),
]
if enc_type == "twopass":
x265_params.extend(["stats=" + str(pass_file), "pass=" + str(pass_number)])
cmd.extend(
[
"-force_key_frames",
"expr:gte(t,n_forced*" + str(KEYFRAME_DISTANCE) + ")",
"-x265-params",
":".join(x265_params),
"-preset",
preset,
"-profile:v",
VIDEO_PROFILES[codec],
]
)
elif encoder == "libvpx-vp9":
cmd.extend(
[
"-g",
str(keyframe_distance),
"-keyint_min",
str(keyframe_distance),
"-maxrate",
str(int(int(target_rate) * MAX_RATE_MULTIPLIER)) + "k",
"-bufsize",
str(int(int(target_rate) * BUF_SIZE_MULTIPLIER)) + "k",
"-speed",
speed,
# '-deadline', 'realtime',
]
)
if enc_type == "twopass":
cmd.extend(["-passlogfile", pass_file, "-pass", pass_number])
cmd.extend(
[
"-strict",
"-2",
]
)
# end of the command
if pass_number == 1:
cmd.extend(["-an", "-f", "null", "/dev/null"])
elif pass_number == 2:
if output_file.endswith("mp4") and chunk:
cmd.extend(["-movflags", "+faststart"])
cmd.extend([output_file])
return cmd
def produce_ffmpeg_commands(
media_file, media_info, resolution, codec, output_filename, pass_file, chunk=False
):
try:
media_info = json.loads(media_info)
except BaseException:
media_info = {}
if codec == "h264":
encoder = "libx264"
ext = "mp4"
elif codec in ["h265", "hevc"]:
encoder = "libx265"
ext = "mp4"
elif codec == "vp9":
encoder = "libvpx-vp9"
ext = "webm"
else:
return False
src_framerate = media_info.get("video_frame_rate", 30)
if src_framerate <= 30:
target_rate = VIDEO_BITRATES[codec][25].get(resolution)
else:
target_rate = VIDEO_BITRATES[codec][60].get(resolution)
if not target_rate: # INVESTIGATE MORE!
target_rate = VIDEO_BITRATES[codec][25].get(resolution)
if not target_rate:
return False
if media_info.get("video_height") < resolution:
if resolution not in [240, 360]: # always get these two
return False
# if codec == "h264_baseline":
# target_fps = 25
# else:
# adjust the target frame rate if the input is fractional
target_fps = (
src_framerate if isinstance(src_framerate, int) else math.ceil(src_framerate)
)
if media_info.get("video_duration") > CRF_ENCODING_NUM_SECONDS:
enc_type = "crf"
else:
enc_type = "twopass"
if enc_type == "twopass":
passes = [1, 2]
elif enc_type == "crf":
passes = [2]
cmds = []
for pass_number in passes:
cmds.append(
get_base_ffmpeg_command(
media_file,
output_file=output_filename,
has_audio=media_info.get("has_audio"),
codec=codec,
encoder=encoder,
audio_encoder=AUDIO_ENCODERS[codec],
target_fps=target_fps,
target_height=resolution,
target_rate=target_rate,
target_rate_audio=AUDIO_BITRATES[codec],
pass_file=pass_file,
pass_number=pass_number,
enc_type=enc_type,
chunk=chunk,
)
)
return cmds
def clean_query(query):
"""This is used to clear text in order to comply with SearchQuery
known exception cases
:param query: str - the query text that we want to clean
:return:
"""
if not query:
return ""
chars = ["^", "{", "}", "&", "|", "<", ">", '"', ")", "(", "!", ":", ";", "'", "#"]
for char in chars:
query = query.replace(char, "")
return query.lower()

195
files/management_views.py Normal file
View File

@@ -0,0 +1,195 @@
from rest_framework.views import APIView
from rest_framework.parsers import JSONParser
from rest_framework.settings import api_settings
from rest_framework.response import Response
from rest_framework import status
from users.models import User
from users.serializers import UserSerializer
from .permissions import IsMediacmsEditor
from .models import Media, Comment
from .methods import is_mediacms_manager
from .serializers import MediaSerializer, CommentSerializer
class MediaList(APIView):
"""Media listings
Used on management pages of MediaCMS
Should be available only to MediaCMS editors,
managers and admins
"""
permission_classes = (IsMediacmsEditor,)
parser_classes = (JSONParser,)
def get(self, request, format=None):
params = self.request.query_params
ordering = params.get("ordering", "").strip()
sort_by = params.get("sort_by", "").strip()
state = params.get("state", "").strip()
encoding_status = params.get("encoding_status", "").strip()
media_type = params.get("media_type", "").strip()
featured = params.get("featured", "").strip()
is_reviewed = params.get("is_reviewed", "").strip()
sort_by_options = [
"title",
"add_date",
"edit_date",
"views",
"likes",
"reported_times",
]
if sort_by not in sort_by_options:
sort_by = "add_date"
if ordering == "asc":
ordering = ""
else:
ordering = "-"
if media_type not in ["video", "image", "audio", "pdf"]:
media_type = None
if state not in ["private", "public", "unlisted"]:
state = None
if encoding_status not in ["pending", "running", "fail", "success"]:
encoding_status = None
if featured == "true":
featured = True
elif featured == "false":
featured = False
else:
featured = "all"
if is_reviewed == "true":
is_reviewed = True
elif is_reviewed == "false":
is_reviewed = False
else:
is_reviewed = "all"
pagination_class = api_settings.DEFAULT_PAGINATION_CLASS
qs = Media.objects.filter()
if state:
qs = qs.filter(state=state)
if encoding_status:
qs = qs.filter(encoding_status=encoding_status)
if media_type:
qs = qs.filter(media_type=media_type)
if featured != "all":
qs = qs.filter(featured=featured)
if is_reviewed != "all":
qs = qs.filter(is_reviewed=is_reviewed)
media = qs.order_by(f"{ordering}{sort_by}")
paginator = pagination_class()
page = paginator.paginate_queryset(media, request)
serializer = MediaSerializer(page, many=True, context={"request": request})
return paginator.get_paginated_response(serializer.data)
def delete(self, request, format=None):
tokens = request.GET.get("tokens")
if tokens:
tokens = tokens.split(",")
Media.objects.filter(friendly_token__in=tokens).delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class CommentList(APIView):
"""Comments listings
Used on management pages of MediaCMS
Should be available only to MediaCMS editors,
managers and admins
"""
permission_classes = (IsMediacmsEditor,)
parser_classes = (JSONParser,)
def get(self, request, format=None):
params = self.request.query_params
ordering = params.get("ordering", "").strip()
sort_by = params.get("sort_by", "").strip()
sort_by_options = ["text", "add_date"]
if sort_by not in sort_by_options:
sort_by = "add_date"
if ordering == "asc":
ordering = ""
else:
ordering = "-"
pagination_class = api_settings.DEFAULT_PAGINATION_CLASS
qs = Comment.objects.filter()
media = qs.order_by(f"{ordering}{sort_by}")
paginator = pagination_class()
page = paginator.paginate_queryset(media, request)
serializer = CommentSerializer(page, many=True, context={"request": request})
return paginator.get_paginated_response(serializer.data)
def delete(self, request, format=None):
comment_ids = request.GET.get('comment_ids')
if comment_ids:
comments = comment_ids.split(',')
Comment.objects.filter(uid__in=comments).delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class UserList(APIView):
"""Users listings
Used on management pages of MediaCMS
Should be available only to MediaCMS editors,
managers and admins. Delete should be option
for managers+admins only.
"""
permission_classes = (IsMediacmsEditor,)
parser_classes = (JSONParser,)
def get(self, request, format=None):
params = self.request.query_params
ordering = params.get("ordering", "").strip()
sort_by = params.get("sort_by", "").strip()
sort_by_options = ["date_added", "name"]
if sort_by not in sort_by_options:
sort_by = "date_added"
if ordering == "asc":
ordering = ""
else:
ordering = "-"
pagination_class = api_settings.DEFAULT_PAGINATION_CLASS
qs = User.objects.filter()
media = qs.order_by(f"{ordering}{sort_by}")
paginator = pagination_class()
page = paginator.paginate_queryset(media, request)
serializer = UserSerializer(page, many=True, context={"request": request})
return paginator.get_paginated_response(serializer.data)
def delete(self, request, format=None):
if not is_mediacms_manager(request.user):
return Response(
{"detail": "bad permissions"}, status=status.HTTP_400_BAD_REQUEST
)
tokens = request.GET.get("tokens")
if tokens:
tokens = tokens.split(",")
User.objects.filter(username__in=tokens).delete()
return Response(status=status.HTTP_204_NO_CONTENT)

437
files/methods.py Normal file
View File

@@ -0,0 +1,437 @@
# Kudos to Werner Robitza, AVEQ GmbH, for helping with ffmpeg
# related content
import logging
import random
import itertools
from datetime import datetime
from cms import celery_app
from django.conf import settings
from django.core.cache import cache
from django.db.models import Q
from django.core.mail import EmailMessage
from . import models
from .helpers import mask_ip
logger = logging.getLogger(__name__)
def get_user_or_session(request):
"""Return a dictionary with user info
whether user is authenticated or not
this is used in action calculations, example for
increasing the watch counter of a media
"""
ret = {}
if request.user.is_authenticated:
ret["user_id"] = request.user.id
else:
if not request.session.session_key:
request.session.save()
ret["user_session"] = request.session.session_key
if settings.MASK_IPS_FOR_ACTIONS:
ret["remote_ip_addr"] = mask_ip(request.META.get("REMOTE_ADDR"))
else:
ret["remote_ip_addr"] = request.META.get("REMOTE_ADDR")
return ret
def pre_save_action(media, user, session_key, action, remote_ip):
"""This will perform some checkes
example threshold checks, before performing an action
"""
from actions.models import MediaAction
if user:
query = MediaAction.objects.filter(media=media, action=action, user=user)
else:
query = MediaAction.objects.filter(
media=media, action=action, session_key=session_key
)
query = query.order_by("-action_date")
if query:
query = query.first()
if action in ["like", "dislike", "report"]:
return False # has alread done action once
elif action == "watch" and user:
# increase the number of times a media is viewed
if media.duration:
now = datetime.now(query.action_date.tzinfo)
if (now - query.action_date).seconds > media.duration:
return True
else:
if user: # first time action
return True
if not user:
# perform some checking for requests where no session
# id is specified (and user is anonymous) to avoid spam
# eg allow for the same remote_ip for a specific number of actions
query = (
MediaAction.objects.filter(media=media, action=action, remote_ip=remote_ip)
.filter(user=None)
.order_by("-action_date")
)
if query:
query = query.first()
now = datetime.now(query.action_date.tzinfo)
if action == "watch":
if not (now - query.action_date).seconds > media.duration:
return False
if (now - query.action_date).seconds > settings.TIME_TO_ACTION_ANONYMOUS:
return True
else:
return True
return False
def is_mediacms_editor(user):
"""Whether user is MediaCMS editor"""
editor = False
try:
if user.is_superuser or user.is_manager or user.is_editor:
editor = True
except BaseException:
pass
return editor
def is_mediacms_manager(user):
"""Whether user is MediaCMS manager"""
manager = False
try:
if user.is_superuser or user.is_manager:
manager = True
except BaseException:
pass
return manager
def get_next_state(user, current_state, next_state):
"""Return valid state, given a current and next state
and the user object.
Users may themselves perform only allowed transitions
"""
if next_state not in ["public", "private", "unlisted"]:
next_state = settings.PORTAL_WORKFLOW # get default state
if is_mediacms_editor(user):
# allow any transition
return next_state
if settings.PORTAL_WORKFLOW == "private":
next_state = "private"
if settings.PORTAL_WORKFLOW == "unlisted":
# don't allow to make media public in this case
if next_state == "public":
next_state = current_state
return next_state
def notify_users(friendly_token=None, action=None, extra=None):
"""Notify users through email, for a set of actions"""
notify_items = []
media = None
if friendly_token:
media = models.Media.objects.filter(friendly_token=friendly_token).first()
if not media:
return False
media_url = settings.SSL_FRONTEND_HOST + media.get_absolute_url()
if action == "media_reported" and media:
if settings.ADMINS_NOTIFICATIONS.get("MEDIA_REPORTED", False):
title = "[{}] - Media was reported".format(settings.PORTAL_NAME)
msg = """
Media %s was reported.
Reason: %s\n
Total times this media has been reported: %s
""" % (
media_url,
extra,
media.reported_times,
)
d = {}
d["title"] = title
d["msg"] = msg
d["to"] = settings.ADMIN_EMAIL_LIST
notify_items.append(d)
if action == "media_added" and media:
if settings.ADMINS_NOTIFICATIONS.get("MEDIA_ADDED", False):
title = "[{}] - Media was added".format(settings.PORTAL_NAME)
msg = """
Media %s was added by user %s.
""" % (
media_url,
media.user,
)
d = {}
d["title"] = title
d["msg"] = msg
d["to"] = settings.ADMIN_EMAIL_LIST
notify_items.append(d)
if settings.USERS_NOTIFICATIONS.get("MEDIA_ADDED", False):
title = "[{}] - Your media was added".format(settings.PORTAL_NAME)
msg = """
Your media has been added! It will be encoded and will be available soon.
URL: %s
""" % (
media_url
)
d = {}
d["title"] = title
d["msg"] = msg
d["to"] = [media.user.email]
notify_items.append(d)
for item in notify_items:
email = EmailMessage(
item["title"], item["msg"], settings.DEFAULT_FROM_EMAIL, item["to"]
)
email.send(fail_silently=True)
return True
def show_recommended_media(request, limit=100):
"""Return a list of recommended media
used on the index page
"""
basic_query = Q(listable=True)
pmi = cache.get("popular_media_ids")
# produced by task get_list_of_popular_media and cached
if pmi:
media = list(
models.Media.objects.filter(friendly_token__in=pmi)
.filter(basic_query)
.prefetch_related("user")[:limit]
)
else:
media = list(
models.Media.objects.filter(basic_query)
.order_by("-views", "-likes")
.prefetch_related("user")[:limit]
)
random.shuffle(media)
return media
def show_related_media(media, request=None, limit=100):
"""Return a list of related media"""
if settings.RELATED_MEDIA_STRATEGY == "calculated":
return show_related_media_calculated(media, request, limit)
elif settings.RELATED_MEDIA_STRATEGY == "author":
return show_related_media_author(media, request, limit)
return show_related_media_content(media, request, limit)
def show_related_media_content(media, request, limit):
"""Return a list of related media based on simple calculations"""
# Create list with author items
# then items on same category, then some random(latest)
# Aim is to always show enough (limit) videos
# and include author videos in any case
q_author = Q(listable=True, user=media.user)
m = list(
models.Media.objects.filter(q_author)
.order_by()
.prefetch_related("user")[:limit]
)
# order by random criteria so that it doesn't bring the same results
# attention: only fields that are indexed make sense here! also need
# find a way for indexes with more than 1 field
order_criteria = [
"-views",
"views",
"add_date",
"-add_date",
"featured",
"-featured",
"user_featured",
"-user_featured",
]
# TODO: MAke this mess more readable, and add TAGS support - aka related
# tags rather than random media
if len(m) < limit:
category = media.category.first()
if category:
q_category = Q(listable=True, category=category)
q_res = (
models.Media.objects.filter(q_category)
.order_by(order_criteria[random.randint(0, len(order_criteria) - 1)])
.prefetch_related("user")[: limit - media.user.media_count]
)
m = list(itertools.chain(m, q_res))
if len(m) < limit:
q_generic = Q(listable=True)
q_res = (
models.Media.objects.filter(q_generic)
.order_by(order_criteria[random.randint(0, len(order_criteria) - 1)])
.prefetch_related("user")[: limit - media.user.media_count]
)
m = list(itertools.chain(m, q_res))
m = list(set(m[:limit])) # remove duplicates
try:
m.remove(media) # remove media from results
except ValueError:
pass
random.shuffle(m)
return m
def show_related_media_author(media, request, limit):
"""Return a list of related media form the same author"""
q_author = Q(listable=True, user=media.user)
m = list(
models.Media.objects.filter(q_author)
.order_by()
.prefetch_related("user")[:limit]
)
# order by random criteria so that it doesn't bring the same results
# attention: only fields that are indexed make sense here! also need
# find a way for indexes with more than 1 field
m = list(set(m[:limit])) # remove duplicates
try:
m.remove(media) # remove media from results
except ValueError:
pass
random.shuffle(m)
return m
def show_related_media_calculated(media, request, limit):
"""Return a list of related media based on ML recommendations
A big todo!
"""
return []
def update_user_ratings(user, media, user_ratings):
"""Populate user ratings for a media"""
for rating in user_ratings:
user_rating = (
models.Rating.objects.filter(
user=user, media_id=media, rating_category_id=rating.get("category_id")
)
.only("score")
.first()
)
if user_rating:
rating["score"] = user_rating.score
return user_ratings
def notify_user_on_comment(friendly_token):
"""Notify users through email, for a set of actions"""
media = None
media = models.Media.objects.filter(friendly_token=friendly_token).first()
if not media:
return False
user = media.user
media_url = settings.SSL_FRONTEND_HOST + media.get_absolute_url()
if user.notification_on_comments:
title = "[{}] - A comment was added".format(settings.PORTAL_NAME)
msg = """
A comment has been added to your media %s .
View it on %s
""" % (
media.title,
media_url,
)
email = EmailMessage(
title, msg, settings.DEFAULT_FROM_EMAIL, [media.user.email]
)
email.send(fail_silently=True)
return True
def list_tasks():
"""Lists celery tasks
To be used in an admin dashboard
"""
i = celery_app.control.inspect([])
ret = {}
temp = {}
task_ids = []
media_profile_pairs = []
temp["active"] = i.active()
temp["reserved"] = i.reserved()
temp["scheduled"] = i.scheduled()
for state, state_dict in temp.items():
ret[state] = {}
ret[state]["tasks"] = []
for worker, worker_dict in state_dict.items():
for task in worker_dict:
task_dict = {}
task_dict["worker"] = worker
task_dict["task_id"] = task.get("id")
task_ids.append(task.get("id"))
task_dict["args"] = task.get("args")
task_dict["name"] = task.get("name")
task_dict["time_start"] = task.get("time_start")
if task.get("name") == "encode_media":
task_args = task.get("args")
for bad in "(),'":
task_args = task_args.replace(bad, "")
friendly_token = task_args.split()[0]
profile_id = task_args.split()[1]
media = models.Media.objects.filter(
friendly_token=friendly_token
).first()
if media:
profile = models.EncodeProfile.objects.filter(
id=profile_id
).first()
if profile:
media_profile_pairs.append(
(media.friendly_token, profile.id)
)
task_dict["info"] = {}
task_dict["info"]["profile name"] = profile.name
task_dict["info"]["media title"] = media.title
encoding = models.Encoding.objects.filter(
task_id=task.get("id")
).first()
if encoding:
task_dict["info"][
"encoding progress"
] = encoding.progress
ret[state]["tasks"].append(task_dict)
ret["task_ids"] = task_ids
ret["media_profile_pairs"] = media_profile_pairs
return ret

View File

@@ -0,0 +1,637 @@
# Generated by Django 3.1.4 on 2020-12-01 07:12
import django.contrib.postgres.search
from django.db import migrations, models
import files.models
import imagekit.models.fields
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Category",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("uid", models.UUIDField(default=uuid.uuid4, unique=True)),
("add_date", models.DateTimeField(auto_now_add=True)),
("title", models.CharField(db_index=True, max_length=100, unique=True)),
("description", models.TextField(blank=True)),
(
"is_global",
models.BooleanField(
default=False, help_text="global categories or user specific"
),
),
(
"media_count",
models.IntegerField(default=0, help_text="number of media"),
),
(
"thumbnail",
imagekit.models.fields.ProcessedImageField(
blank=True, upload_to=files.models.category_thumb_path
),
),
(
"listings_thumbnail",
models.CharField(
blank=True,
help_text="Thumbnail to show on listings",
max_length=400,
null=True,
),
),
],
options={
"verbose_name_plural": "Categories",
"ordering": ["title"],
},
),
migrations.CreateModel(
name="Comment",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("add_date", models.DateTimeField(auto_now_add=True)),
("text", models.TextField(help_text="text")),
("uid", models.UUIDField(default=uuid.uuid4, unique=True)),
("lft", models.PositiveIntegerField(editable=False)),
("rght", models.PositiveIntegerField(editable=False)),
("tree_id", models.PositiveIntegerField(db_index=True, editable=False)),
("level", models.PositiveIntegerField(editable=False)),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="EncodeProfile",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=90)),
(
"extension",
models.CharField(
choices=[("mp4", "mp4"), ("webm", "webm"), ("gif", "gif")],
max_length=10,
),
),
(
"resolution",
models.IntegerField(
blank=True,
choices=[
(2160, "2160"),
(1440, "1440"),
(1080, "1080"),
(720, "720"),
(480, "480"),
(360, "360"),
(240, "240"),
],
null=True,
),
),
(
"codec",
models.CharField(
blank=True,
choices=[("h265", "h265"), ("h264", "h264"), ("vp9", "vp9")],
max_length=10,
null=True,
),
),
("description", models.TextField(blank=True, help_text="description")),
("active", models.BooleanField(default=True)),
],
options={
"ordering": ["resolution"],
},
),
migrations.CreateModel(
name="Encoding",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("add_date", models.DateTimeField(auto_now_add=True)),
("commands", models.TextField(blank=True, help_text="commands run")),
(
"chunk",
models.BooleanField(
db_index=True, default=False, help_text="is chunk?"
),
),
("chunk_file_path", models.CharField(blank=True, max_length=400)),
("chunks_info", models.TextField(blank=True)),
("logs", models.TextField(blank=True)),
("md5sum", models.CharField(blank=True, max_length=50, null=True)),
(
"media_file",
models.FileField(
blank=True,
max_length=500,
upload_to=files.models.encoding_media_file_path,
verbose_name="encoding file",
),
),
("progress", models.PositiveSmallIntegerField(default=0)),
("update_date", models.DateTimeField(auto_now=True)),
("retries", models.IntegerField(default=0)),
("size", models.CharField(blank=True, max_length=20)),
(
"status",
models.CharField(
choices=[
("pending", "Pending"),
("running", "Running"),
("fail", "Fail"),
("success", "Success"),
],
default="pending",
max_length=20,
),
),
("temp_file", models.CharField(blank=True, max_length=400)),
("task_id", models.CharField(blank=True, max_length=100)),
("total_run_time", models.IntegerField(default=0)),
("worker", models.CharField(blank=True, max_length=100)),
],
),
migrations.CreateModel(
name="Language",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("code", models.CharField(help_text="language code", max_length=12)),
("title", models.CharField(help_text="language code", max_length=100)),
],
options={
"ordering": ["id"],
},
),
migrations.CreateModel(
name="License",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=100, unique=True)),
("description", models.TextField(blank=True)),
],
),
migrations.CreateModel(
name="Media",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"add_date",
models.DateTimeField(
blank=True,
db_index=True,
null=True,
verbose_name="Date produced",
),
),
(
"allow_download",
models.BooleanField(
default=True,
help_text="Whether option to download media is shown",
),
),
("description", models.TextField(blank=True)),
("dislikes", models.IntegerField(default=0)),
("duration", models.IntegerField(default=0)),
("edit_date", models.DateTimeField(auto_now=True)),
(
"enable_comments",
models.BooleanField(
default=True,
help_text="Whether comments will be allowed for this media",
),
),
(
"encoding_status",
models.CharField(
choices=[
("pending", "Pending"),
("running", "Running"),
("fail", "Fail"),
("success", "Success"),
],
db_index=True,
default="pending",
max_length=20,
),
),
(
"featured",
models.BooleanField(
db_index=True,
default=False,
help_text="Whether media is globally featured by a MediaCMS editor",
),
),
(
"friendly_token",
models.CharField(
blank=True,
db_index=True,
help_text="Identifier for the Media",
max_length=12,
),
),
(
"hls_file",
models.CharField(
blank=True,
help_text="Path to HLS file for videos",
max_length=1000,
),
),
(
"is_reviewed",
models.BooleanField(
db_index=True,
default=True,
help_text="Whether media is reviewed, so it can appear on public listings",
),
),
("likes", models.IntegerField(db_index=True, default=1)),
(
"listable",
models.BooleanField(
default=False, help_text="Whether it will appear on listings"
),
),
(
"md5sum",
models.CharField(
blank=True,
help_text="Not exposed, used internally",
max_length=50,
null=True,
),
),
(
"media_file",
models.FileField(
help_text="media file",
max_length=500,
upload_to=files.models.original_media_file_path,
verbose_name="media file",
),
),
(
"media_info",
models.TextField(
blank=True, help_text="extracted media metadata info"
),
),
(
"media_type",
models.CharField(
blank=True,
choices=[
("video", "Video"),
("image", "Image"),
("pdf", "Pdf"),
("audio", "Audio"),
],
db_index=True,
default="video",
max_length=20,
),
),
(
"password",
models.CharField(
blank=True,
help_text="password for private media",
max_length=100,
),
),
(
"preview_file_path",
models.CharField(
blank=True,
help_text="preview gif for videos, path in filesystem",
max_length=500,
),
),
(
"poster",
imagekit.models.fields.ProcessedImageField(
blank=True,
help_text="media extracted big thumbnail, shown on media page",
max_length=500,
upload_to=files.models.original_thumbnail_file_path,
),
),
(
"reported_times",
models.IntegerField(
default=0, help_text="how many time a Medis is reported"
),
),
(
"search",
django.contrib.postgres.search.SearchVectorField(
help_text="used to store all searchable info and metadata for a Media",
null=True,
),
),
(
"size",
models.CharField(
blank=True,
help_text="media size in bytes, automatically calculated",
max_length=20,
null=True,
),
),
(
"sprites",
models.FileField(
blank=True,
help_text="sprites file, only for videos, displayed on the video player",
max_length=500,
upload_to=files.models.original_thumbnail_file_path,
),
),
(
"state",
models.CharField(
choices=[
("private", "Private"),
("public", "Public"),
("unlisted", "Unlisted"),
],
db_index=True,
default="public",
help_text="state of Media",
max_length=20,
),
),
(
"title",
models.CharField(
blank=True,
db_index=True,
help_text="media title",
max_length=100,
),
),
(
"thumbnail",
imagekit.models.fields.ProcessedImageField(
blank=True,
help_text="media extracted small thumbnail, shown on listings",
max_length=500,
upload_to=files.models.original_thumbnail_file_path,
),
),
(
"thumbnail_time",
models.FloatField(
blank=True,
help_text="Time on video that a thumbnail will be taken",
null=True,
),
),
(
"uid",
models.UUIDField(
default=uuid.uuid4,
help_text="A unique identifier for the Media",
unique=True,
),
),
(
"uploaded_thumbnail",
imagekit.models.fields.ProcessedImageField(
blank=True,
help_text="thumbnail from uploaded_poster field",
max_length=500,
upload_to=files.models.original_thumbnail_file_path,
),
),
(
"uploaded_poster",
imagekit.models.fields.ProcessedImageField(
blank=True,
help_text="This image will characterize the media",
max_length=500,
upload_to=files.models.original_thumbnail_file_path,
verbose_name="Upload image",
),
),
(
"user_featured",
models.BooleanField(
default=False, help_text="Featured by the user"
),
),
("video_height", models.IntegerField(default=1)),
("views", models.IntegerField(db_index=True, default=1)),
],
options={
"ordering": ["-add_date"],
},
),
migrations.CreateModel(
name="Playlist",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("add_date", models.DateTimeField(auto_now_add=True, db_index=True)),
("description", models.TextField(blank=True, help_text="description")),
(
"friendly_token",
models.CharField(blank=True, db_index=True, max_length=12),
),
("title", models.CharField(db_index=True, max_length=100)),
("uid", models.UUIDField(default=uuid.uuid4, unique=True)),
],
),
migrations.CreateModel(
name="PlaylistMedia",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("action_date", models.DateTimeField(auto_now=True)),
("ordering", models.IntegerField(default=1)),
],
options={
"ordering": ["ordering", "-action_date"],
},
),
migrations.CreateModel(
name="Rating",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("add_date", models.DateTimeField(auto_now_add=True)),
(
"score",
models.IntegerField(validators=[files.models.validate_rating]),
),
],
options={
"verbose_name_plural": "Ratings",
},
),
migrations.CreateModel(
name="RatingCategory",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("description", models.TextField(blank=True)),
("enabled", models.BooleanField(default=True)),
("title", models.CharField(db_index=True, max_length=200, unique=True)),
],
options={
"verbose_name_plural": "Rating Categories",
},
),
migrations.CreateModel(
name="Subtitle",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"subtitle_file",
models.FileField(
help_text="File has to be WebVTT format",
max_length=500,
upload_to=files.models.subtitles_file_path,
verbose_name="Subtitle/CC file",
),
),
],
),
migrations.CreateModel(
name="Tag",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(db_index=True, max_length=100, unique=True)),
(
"media_count",
models.IntegerField(default=0, help_text="number of media"),
),
(
"listings_thumbnail",
models.CharField(
blank=True,
db_index=True,
help_text="Thumbnail to show on listings",
max_length=400,
null=True,
),
),
],
options={
"ordering": ["title"],
},
),
]

View File

@@ -0,0 +1,240 @@
# Generated by Django 3.1.4 on 2020-12-01 07:12
from django.conf import settings
import django.contrib.postgres.indexes
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("files", "0001_initial"),
("users", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="tag",
name="user",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
migrations.AddField(
model_name="subtitle",
name="language",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="files.language"
),
),
migrations.AddField(
model_name="subtitle",
name="media",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="subtitles",
to="files.media",
),
),
migrations.AddField(
model_name="subtitle",
name="user",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
migrations.AddField(
model_name="rating",
name="media",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="ratings",
to="files.media",
),
),
migrations.AddField(
model_name="rating",
name="rating_category",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="files.ratingcategory"
),
),
migrations.AddField(
model_name="rating",
name="user",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
migrations.AddField(
model_name="playlistmedia",
name="media",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="files.media"
),
),
migrations.AddField(
model_name="playlistmedia",
name="playlist",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="files.playlist"
),
),
migrations.AddField(
model_name="playlist",
name="media",
field=models.ManyToManyField(
blank=True, through="files.PlaylistMedia", to="files.Media"
),
),
migrations.AddField(
model_name="playlist",
name="user",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="playlists",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AddField(
model_name="media",
name="category",
field=models.ManyToManyField(
blank=True,
help_text="Media can be part of one or more categories",
to="files.Category",
),
),
migrations.AddField(
model_name="media",
name="channel",
field=models.ForeignKey(
blank=True,
help_text="Media can exist in one or no Channels",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="users.channel",
),
),
migrations.AddField(
model_name="media",
name="license",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="files.license",
),
),
migrations.AddField(
model_name="media",
name="rating_category",
field=models.ManyToManyField(
blank=True,
help_text="Rating category, if media Rating is allowed",
to="files.RatingCategory",
),
),
migrations.AddField(
model_name="media",
name="tags",
field=models.ManyToManyField(
blank=True,
help_text="select one or more out of the existing tags",
to="files.Tag",
),
),
migrations.AddField(
model_name="media",
name="user",
field=models.ForeignKey(
help_text="user that uploads the media",
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
migrations.AddField(
model_name="encoding",
name="media",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="encodings",
to="files.media",
),
),
migrations.AddField(
model_name="encoding",
name="profile",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="files.encodeprofile"
),
),
migrations.AddField(
model_name="comment",
name="media",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="comments",
to="files.media",
),
),
migrations.AddField(
model_name="comment",
name="parent",
field=mptt.fields.TreeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="children",
to="files.comment",
),
),
migrations.AddField(
model_name="comment",
name="user",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
migrations.AddField(
model_name="category",
name="user",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
migrations.AddIndex(
model_name="rating",
index=models.Index(
fields=["user", "media"], name="files_ratin_user_id_72ca6a_idx"
),
),
migrations.AlterUniqueTogether(
name="rating",
unique_together={("user", "media", "rating_category")},
),
migrations.AddIndex(
model_name="media",
index=models.Index(
fields=["state", "encoding_status", "is_reviewed"],
name="files_media_state_666b93_idx",
),
),
migrations.AddIndex(
model_name="media",
index=django.contrib.postgres.indexes.GinIndex(
fields=["search"], name="files_media_search_7194c6_gin"
),
),
]

View File

1714
files/models.py Normal file

File diff suppressed because it is too large Load Diff

9
files/permissions.py Normal file
View File

@@ -0,0 +1,9 @@
from rest_framework import permissions
from .methods import is_mediacms_editor
class IsMediacmsEditor(permissions.BasePermission):
def has_permission(self, request, view):
if is_mediacms_editor(request.user):
return True
return False

257
files/serializers.py Normal file
View File

@@ -0,0 +1,257 @@
from rest_framework import serializers
from .models import Media, EncodeProfile, Playlist, Comment, Category, Tag
# TODO: put them in a more DRY way
class MediaSerializer(serializers.ModelSerializer):
# to be used in APIs as show related media
user = serializers.ReadOnlyField(source="user.username")
url = serializers.SerializerMethodField()
api_url = serializers.SerializerMethodField()
thumbnail_url = serializers.SerializerMethodField()
author_profile = serializers.SerializerMethodField()
author_thumbnail = serializers.SerializerMethodField()
def get_url(self, obj):
return self.context["request"].build_absolute_uri(obj.get_absolute_url())
def get_api_url(self, obj):
return self.context["request"].build_absolute_uri(
obj.get_absolute_url(api=True)
)
def get_thumbnail_url(self, obj):
return self.context["request"].build_absolute_uri(obj.thumbnail_url)
def get_author_profile(self, obj):
return self.context["request"].build_absolute_uri(obj.author_profile())
def get_author_thumbnail(self, obj):
return self.context["request"].build_absolute_uri(obj.author_thumbnail())
class Meta:
model = Media
read_only_fields = (
"friendly_token",
"user",
"add_date",
"views",
"media_type",
"state",
"duration",
"encoding_status",
"views",
"likes",
"dislikes",
"reported_times",
"size",
"is_reviewed",
)
fields = (
"friendly_token",
"url",
"api_url",
"user",
"title",
"description",
"add_date",
"views",
"media_type",
"state",
"duration",
"thumbnail_url",
"is_reviewed",
"url",
"api_url",
"preview_url",
"author_name",
"author_profile",
"author_thumbnail",
"encoding_status",
"views",
"likes",
"dislikes",
"reported_times",
"featured",
"user_featured",
"size",
)
class SingleMediaSerializer(serializers.ModelSerializer):
user = serializers.ReadOnlyField(source="user.username")
url = serializers.SerializerMethodField()
def get_url(self, obj):
return self.context["request"].build_absolute_uri(obj.get_absolute_url())
class Meta:
model = Media
read_only_fields = (
"friendly_token",
"user",
"add_date",
"views",
"media_type",
"state",
"duration",
"encoding_status",
"views",
"likes",
"dislikes",
"reported_times",
"size",
"video_height",
"is_reviewed",
)
fields = (
"url",
"user",
"title",
"description",
"add_date",
"edit_date",
"media_type",
"state",
"duration",
"thumbnail_url",
"poster_url",
"thumbnail_time",
"url",
"sprites_url",
"preview_url",
"author_name",
"author_profile",
"author_thumbnail",
"encodings_info",
"encoding_status",
"views",
"likes",
"dislikes",
"reported_times",
"user_featured",
"original_media_url",
"size",
"video_height",
"enable_comments",
"categories_info",
"is_reviewed",
"edit_url",
"tags_info",
"hls_info",
"license",
"subtitles_info",
"ratings_info",
"add_subtitle_url",
"allow_download",
)
class MediaSearchSerializer(serializers.ModelSerializer):
url = serializers.SerializerMethodField()
def get_url(self, obj):
return self.context["request"].build_absolute_uri(obj.get_absolute_url())
class Meta:
model = Media
fields = (
"title",
"author_name",
"author_profile",
"thumbnail_url",
"add_date",
"views",
"description",
"friendly_token",
"duration",
"url",
"media_type",
"preview_url",
"categories_info",
)
class EncodeProfileSerializer(serializers.ModelSerializer):
class Meta:
model = EncodeProfile
fields = ("name", "extension", "resolution", "codec", "description")
class CategorySerializer(serializers.ModelSerializer):
user = serializers.ReadOnlyField(source="user.username")
class Meta:
model = Category
fields = (
"title",
"description",
"is_global",
"media_count",
"user",
"thumbnail_url",
)
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = Tag
fields = ("title", "media_count", "thumbnail_url")
class PlaylistSerializer(serializers.ModelSerializer):
user = serializers.ReadOnlyField(source="user.username")
class Meta:
model = Playlist
read_only_fields = ("add_date", "user")
fields = (
"add_date",
"title",
"description",
"user",
"media_count",
"url",
"api_url",
"thumbnail_url"
)
class PlaylistDetailSerializer(serializers.ModelSerializer):
user = serializers.ReadOnlyField(source="user.username")
class Meta:
model = Playlist
read_only_fields = ("add_date", "user")
fields = (
"title",
"add_date",
"user_thumbnail_url",
"description",
"user",
"media_count",
"url",
"thumbnail_url"
)
class CommentSerializer(serializers.ModelSerializer):
author_profile = serializers.ReadOnlyField(source="user.get_absolute_url")
author_name = serializers.ReadOnlyField(source="user.name")
author_thumbnail_url = serializers.ReadOnlyField(source="user.thumbnail_url")
class Meta:
model = Comment
read_only_fields = ("add_date", "uid")
fields = (
"add_date",
"text",
"parent",
"author_thumbnail_url",
"author_profile",
"author_name",
"media_url",
"uid",
)

86
files/stop_words.py Normal file
View File

@@ -0,0 +1,86 @@
# -*- coding: utf-8 -*-
STOP_WORDS = set(
"""
a about above across after afterwards again against all almost alone along
already also although always am among amongst amount an and another any anyhow
anyone anything anyway anywhere are around as at
back be became because become becomes becoming been before beforehand behind
being below beside besides between beyond both bottom but by
call can cannot ca could
did do does doing done down due during
each eight either eleven else elsewhere empty enough even ever every
everyone everything everywhere except
few fifteen fifty first five for former formerly forty four from front full
further
get give go
had has have he hence her here hereafter hereby herein hereupon hers herself
him himself his how however hundred
i if in indeed into is it its itself
keep
last latter latterly least less
just
made make many may me meanwhile might mine more moreover most mostly move much
must my myself
name namely neither never nevertheless next nine no nobody none noone nor not
nothing now nowhere
of off often on once one only onto or other others otherwise our ours ourselves
out over own
part per perhaps please put
quite
rather re really regarding
same say see seem seemed seeming seems serious several she should show side
since six sixty so some somehow someone something sometime sometimes somewhere
still such
take ten than that the their them themselves then thence there thereafter
thereby therefore therein thereupon these they third this those though three
through throughout thru thus to together too top toward towards twelve twenty
two
under until up unless upon us used using
various very very via was we well were what whatever when whence whenever where
whereafter whereas whereby wherein whereupon wherever whether which while
whither who whoever whole whom whose why will with within without would
yet you your yours yourself yourselves
""".split()
)
SPANISH_STOP_WORDS = set(
"""
a actualmente acuerdo adelante ademas además adrede afirmó agregó ahi ahora ahí al algo alguna algunas alguno algunos algún alli allí alrededor ambos ampleamos antano antaño ante anterior antes apenas aproximadamente aquel aquella aquellas aquello aquellos aqui aquél aquélla aquéllas aquéllos aquí arriba arribaabajo aseguró asi así atras aun aunque ayer añadió aún
b bajo bastante bien breve buen buena buenas bueno buenos
c cada casi cerca cierta ciertas cierto ciertos cinco claro comentó como con conmigo conocer conseguimos conseguir considera consideró consigo consigue consiguen consigues contigo contra cosas creo cual cuales cualquier cuando cuanta cuantas cuanto cuantos cuatro cuenta cuál cuáles cuándo cuánta cuántas cuánto cuántos cómo
d da dado dan dar de debajo debe deben debido decir dejó del delante demasiado demás dentro deprisa desde despacio despues después detras detrás dia dias dice dicen dicho dieron diferente diferentes dijeron dijo dio donde dos durante día días dónde
e ejemplo el ella ellas ello ellos embargo empleais emplean emplear empleas empleo en encima encuentra enfrente enseguida entonces entre era erais eramos eran eras eres es esa esas ese eso esos esta estaba estabais estaban estabas estad estada estadas estado estados estais estamos estan estando estar estaremos estará estarán estarás estaré estaréis estaría estaríais estaríamos estarían estarías estas este estemos esto estos estoy estuve estuviera estuvierais estuvieran estuvieras estuvieron estuviese estuvieseis estuviesen estuvieses estuvimos estuviste estuvisteis estuviéramos estuviésemos estuvo está estábamos estáis están estás esté estéis estén estés ex excepto existe existen explicó expresó
f fin final fue fuera fuerais fueran fueras fueron fuese fueseis fuesen fueses fui fuimos fuiste fuisteis fuéramos fuésemos
g general gran grandes gueno
h ha haber habia habida habidas habido habidos habiendo habla hablan habremos habrá habrán habrás habré habréis habría habríais habríamos habrían habrías habéis había habíais habíamos habían habías hace haceis hacemos hacen hacer hacerlo haces hacia haciendo hago han has hasta hay haya hayamos hayan hayas hayáis he hecho hemos hicieron hizo horas hoy hube hubiera hubierais hubieran hubieras hubieron hubiese hubieseis hubiesen hubieses hubimos hubiste hubisteis hubiéramos hubiésemos hubo
i igual incluso indicó informo informó intenta intentais intentamos intentan intentar intentas intento ir
j junto
k
l la lado largo las le lejos les llegó lleva llevar lo los luego lugar
m mal manera manifestó mas mayor me mediante medio mejor mencionó menos menudo mi mia mias mientras mio mios mis misma mismas mismo mismos modo momento mucha muchas mucho muchos muy más mí mía mías mío míos
n nada nadie ni ninguna ningunas ninguno ningunos ningún no nos nosotras nosotros nuestra nuestras nuestro nuestros nueva nuevas nuevo nuevos nunca
o ocho os otra otras otro otros
p pais para parece parte partir pasada pasado paìs peor pero pesar poca pocas poco pocos podeis podemos poder podria podriais podriamos podrian podrias podrá podrán podría podrían poner por por qué porque posible primer primera primero primeros principalmente pronto propia propias propio propios proximo próximo próximos pudo pueda puede pueden puedo pues
q qeu que quedó queremos quien quienes quiere quiza quizas quizá quizás quién quiénes qué
r raras realizado realizar realizó repente respecto
s sabe sabeis sabemos saben saber sabes sal salvo se sea seamos sean seas segun segunda segundo según seis ser sera seremos será serán serás seré seréis sería seríais seríamos serían serías seáis señaló si sido siempre siendo siete sigue siguiente sin sino sobre sois sola solamente solas solo solos somos son soy soyos su supuesto sus suya suyas suyo suyos sé sí sólo
t tal tambien también tampoco tan tanto tarde te temprano tendremos tendrá tendrán tendrás tendré tendréis tendría tendríais tendríamos tendrían tendrías tened teneis tenemos tener tenga tengamos tengan tengas tengo tengáis tenida tenidas tenido tenidos teniendo tenéis tenía teníais teníamos tenían tenías tercera ti tiempo tiene tienen tienes toda todas todavia todavía todo todos total trabaja trabajais trabajamos trabajan trabajar trabajas trabajo tras trata través tres tu tus tuve tuviera tuvierais tuvieran tuvieras tuvieron tuviese tuvieseis tuviesen tuvieses tuvimos tuviste tuvisteis tuviéramos tuviésemos tuvo tuya tuyas tuyo tuyos tú
u ultimo un una unas uno unos usa usais usamos usan usar usas uso usted ustedes
v va vais valor vamos van varias varios vaya veces ver verdad verdadera verdadero vez vosotras vosotros voy vuestra vuestras vuestro vuestros
w
x
y ya yo
z
él éramos ésa ésas ése ésos ésta éstas éste éstos última últimas último últimos
""".split()
)
STOP_WORDS.update(SPANISH_STOP_WORDS)
contractions = ["n't", "'d", "'ll", "'m", "'re", "'s", "'ve"]
STOP_WORDS.update(contractions)
for apostrophe in ["", ""]:
for stopword in contractions:
STOP_WORDS.add(stopword.replace("'", apostrophe))

851
files/tasks.py Normal file
View File

@@ -0,0 +1,851 @@
import re
import os
import json
import subprocess
from datetime import datetime, timedelta
import tempfile
import shutil
from django.core.cache import cache
from django.conf import settings
from django.core.files import File
from django.db.models import Q
from celery import Task
from celery.decorators import task
from celery.utils.log import get_task_logger
from celery.exceptions import SoftTimeLimitExceeded
from celery.task.control import revoke
from celery.signals import task_revoked
from .backends import FFmpegBackend
from .exceptions import VideoEncodingError
from .helpers import (
calculate_seconds,
rm_file,
create_temp_file,
get_file_name,
get_file_type,
media_file_info,
run_command,
produce_ffmpeg_commands,
produce_friendly_token,
)
from actions.models import MediaAction, USER_MEDIA_ACTIONS
from users.models import User
from .models import Encoding, EncodeProfile, Media, Category, Rating, Tag
from .methods import list_tasks, pre_save_action, notify_users
logger = get_task_logger(__name__)
VALID_USER_ACTIONS = [action for action, name in USER_MEDIA_ACTIONS]
ERRORS_LIST = [
"Output file is empty, nothing was encoded",
"Invalid data found when processing input",
"Unable to find a suitable output format for",
]
@task(name="chunkize_media", bind=True, queue="short_tasks", soft_time_limit=60 * 30)
def chunkize_media(self, friendly_token, profiles, force=True):
"""Break media in chunks and start encoding tasks"""
profiles = [EncodeProfile.objects.get(id=profile) for profile in profiles]
media = Media.objects.get(friendly_token=friendly_token)
cwd = os.path.dirname(os.path.realpath(media.media_file.path))
file_name = media.media_file.path.split("/")[-1]
random_prefix = produce_friendly_token()
file_format = "{0}_{1}".format(random_prefix, file_name)
chunks_file_name = "%02d_{0}".format(file_format)
chunks_file_name += ".mkv"
cmd = [
settings.FFMPEG_COMMAND,
"-y",
"-i",
media.media_file.path,
"-c",
"copy",
"-f",
"segment",
"-segment_time",
str(settings.VIDEO_CHUNKS_DURATION),
chunks_file_name,
]
chunks = []
ret = run_command(cmd, cwd=cwd)
if "out" in ret.keys():
for line in ret.get("error").split("\n"):
ch = re.findall(r"Opening \'([\W\w]+)\' for writing", line)
if ch:
chunks.append(ch[0])
if not chunks:
# command completely failed to segment file.putting to normal encode
logger.info(
"Failed to break file {0} in chunks."
" Putting to normal encode queue".format(friendly_token)
)
for profile in profiles:
if media.video_height and media.video_height < profile.resolution:
if profile.resolution not in settings.MINIMUM_RESOLUTIONS_TO_ENCODE:
continue
encoding = Encoding(media=media, profile=profile)
encoding.save()
enc_url = settings.SSL_FRONTEND_HOST + encoding.get_absolute_url()
encode_media.delay(
friendly_token, profile.id, encoding.id, enc_url, force=force
)
return False
chunks = [os.path.join(cwd, ch) for ch in chunks]
to_profiles = []
chunks_dict = {}
# calculate once md5sums
for chunk in chunks:
cmd = ["md5sum", chunk]
stdout = run_command(cmd).get("out")
md5sum = stdout.strip().split()[0]
chunks_dict[chunk] = md5sum
for profile in profiles:
if media.video_height and media.video_height < profile.resolution:
if profile.resolution not in settings.MINIMUM_RESOLUTIONS_TO_ENCODE:
continue
to_profiles.append(profile)
for chunk in chunks:
encoding = Encoding(
media=media,
profile=profile,
chunk_file_path=chunk,
chunk=True,
chunks_info=json.dumps(chunks_dict),
md5sum=chunks_dict[chunk],
)
encoding.save()
enc_url = settings.SSL_FRONTEND_HOST + encoding.get_absolute_url()
if profile.resolution in settings.MINIMUM_RESOLUTIONS_TO_ENCODE:
priority = 0
else:
priority = 9
encode_media.apply_async(
args=[friendly_token, profile.id, encoding.id, enc_url],
kwargs={"force": force, "chunk": True, "chunk_file_path": chunk},
priority=priority,
)
logger.info(
"got {0} chunks and will encode to {1} profiles".format(
len(chunks), to_profiles
)
)
return True
class EncodingTask(Task):
def on_failure(self, exc, task_id, args, kwargs, einfo):
# mainly used to run some post failure steps
# we get here if a task is revoked
try:
if hasattr(self, "encoding"):
self.encoding.status = "fail"
self.encoding.save(update_fields=["status"])
kill_ffmpeg_process(self.encoding.temp_file)
if hasattr(self.encoding, "media"):
self.encoding.media.post_encode_actions()
except BaseException:
pass
return False
@task(
name="encode_media",
base=EncodingTask,
bind=True,
queue="long_tasks",
soft_time_limit=settings.CELERY_SOFT_TIME_LIMIT,
)
def encode_media(
self,
friendly_token,
profile_id,
encoding_id,
encoding_url,
force=True,
chunk=False,
chunk_file_path="",
):
"""Encode a media to given profile, using ffmpeg, storing progress"""
logger.info(
"Encode Media started, friendly token {0}, profile id {1}, force {2}".format(
friendly_token, profile_id, force
)
)
if self.request.id:
task_id = self.request.id
else:
task_id = None
try:
media = Media.objects.get(friendly_token=friendly_token)
profile = EncodeProfile.objects.get(id=profile_id)
except BaseException:
Encoding.objects.filter(id=encoding_id).delete()
return False
# break logic with chunk True/False
if chunk:
# TODO: in case a video is chunkized and this enters here many times
# it will always run since chunk_file_path is always different
# thus find a better way for this check
if (
Encoding.objects.filter(
media=media, profile=profile, chunk_file_path=chunk_file_path
).count()
> 1
and force == False
):
Encoding.objects.filter(id=encoding_id).delete()
return False
else:
try:
encoding = Encoding.objects.get(id=encoding_id)
encoding.status = "running"
Encoding.objects.filter(
media=media,
profile=profile,
chunk=True,
chunk_file_path=chunk_file_path,
).exclude(id=encoding_id).delete()
except BaseException:
encoding = Encoding(
media=media,
profile=profile,
status="running",
chunk=True,
chunk_file_path=chunk_file_path,
)
else:
if (
Encoding.objects.filter(media=media, profile=profile).count() > 1
and force is False
):
Encoding.objects.filter(id=encoding_id).delete()
return False
else:
try:
encoding = Encoding.objects.get(id=encoding_id)
encoding.status = "running"
Encoding.objects.filter(media=media, profile=profile).exclude(
id=encoding_id
).delete()
except BaseException:
encoding = Encoding(media=media, profile=profile, status="running")
if task_id:
encoding.task_id = task_id
encoding.worker = "localhost"
encoding.retries = self.request.retries
encoding.save()
if profile.extension == "gif":
tf = create_temp_file(suffix=".gif")
# -ss 5 start from 5 second. -t 25 until 25 sec
command = [
settings.FFMPEG_COMMAND,
"-y",
"-ss",
"3",
"-i",
media.media_file.path,
"-hide_banner",
"-vf",
"scale=344:-1:flags=lanczos,fps=1",
"-t",
"25",
"-f",
"gif",
tf,
]
ret = run_command(command)
if os.path.exists(tf) and get_file_type(tf) == "image":
with open(tf, "rb") as f:
myfile = File(f)
encoding.status = "success"
encoding.media_file.save(content=myfile, name=tf)
rm_file(tf)
return True
else:
return False
if chunk:
original_media_path = chunk_file_path
else:
original_media_path = media.media_file.path
if not media.duration:
encoding.status = "fail"
encoding.save(update_fields=["status"])
return False
with tempfile.TemporaryDirectory(dir=settings.TEMP_DIRECTORY) as temp_dir:
tf = create_temp_file(suffix=".{0}".format(profile.extension), dir=temp_dir)
tfpass = create_temp_file(suffix=".{0}".format(profile.extension), dir=temp_dir)
ffmpeg_commands = produce_ffmpeg_commands(
original_media_path,
media.media_info,
resolution=profile.resolution,
codec=profile.codec,
output_filename=tf,
pass_file=tfpass,
chunk=chunk,
)
if not ffmpeg_commands:
encoding.status = "fail"
encoding.save(update_fields=["status"])
return False
encoding.temp_file = tf
encoding.commands = str(ffmpeg_commands)
encoding.save(update_fields=["temp_file", "commands", "task_id"])
# binding these, so they are available on on_failure
self.encoding = encoding
self.media = media
# can be one-pass or two-pass
for ffmpeg_command in ffmpeg_commands:
ffmpeg_command = [str(s) for s in ffmpeg_command]
encoding_backend = FFmpegBackend()
try:
encoding_command = encoding_backend.encode(ffmpeg_command)
duration, n_times = 0, 0
output = ""
while encoding_command:
try:
# TODO: understand an eternal loop
# eg h265 with mv4 file issue, and stop with error
output = next(encoding_command)
duration = calculate_seconds(output)
if duration:
percent = duration * 100 / media.duration
if n_times % 60 == 0:
encoding.progress = percent
try:
encoding.save(
update_fields=["progress", "update_date"]
)
logger.info("Saved {0}".format(round(percent, 2)))
except BaseException:
pass
n_times += 1
except StopIteration:
break
except VideoEncodingError:
# ffmpeg error, or ffmpeg was killed
raise
except Exception as e:
try:
# output is empty, fail message is on the exception
output = e.message
except AttributeError:
output = ""
if isinstance(e, SoftTimeLimitExceeded):
kill_ffmpeg_process(encoding.temp_file)
encoding.logs = output
encoding.status = "fail"
encoding.save(update_fields=["status", "logs"])
raise_exception = True
# if this is an ffmpeg's valid error
# no need for the task to be re-run
# otherwise rerun task...
for error_msg in ERRORS_LIST:
if error_msg.lower() in output.lower():
raise_exception = False
if raise_exception:
raise self.retry(exc=e, countdown=5, max_retries=1)
encoding.logs = output
encoding.progress = 100
success = False
encoding.status = "fail"
if os.path.exists(tf) and os.path.getsize(tf) != 0:
ret = media_file_info(tf)
if ret.get("is_video") or ret.get("is_audio"):
encoding.status = "success"
success = True
with open(tf, "rb") as f:
myfile = File(f)
output_name = "{0}.{1}".format(
get_file_name(original_media_path), profile.extension
)
encoding.media_file.save(content=myfile, name=output_name)
encoding.total_run_time = (
encoding.update_date - encoding.add_date
).seconds
try:
encoding.save(
update_fields=["status", "logs", "progress", "total_run_time"]
)
# this will raise a django.db.utils.DatabaseError error when task is revoked,
# since we delete the encoding at that stage
except BaseException:
pass
return success
@task(name="produce_sprite_from_video", queue="long_tasks")
def produce_sprite_from_video(friendly_token):
"""Produces a sprites file for a video, uses ffmpeg"""
try:
media = Media.objects.get(friendly_token=friendly_token)
except BaseException:
logger.info("failed to get media with friendly_token %s" % friendly_token)
return False
with tempfile.TemporaryDirectory(dir=settings.TEMP_DIRECTORY) as tmpdirname:
try:
tmpdir_image_files = tmpdirname + "/img%03d.jpg"
output_name = tmpdirname + "/sprites.jpg"
cmd = "{0} -i {1} -f image2 -vf 'fps=1/10, scale=160:90' {2}&&files=$(ls {3}/img*.jpg | sort -t '-' -n -k 2 | tr '\n' ' ')&&convert $files -append {4}".format(
settings.FFMPEG_COMMAND,
media.media_file.path,
tmpdir_image_files,
tmpdirname,
output_name,
)
ret = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)
if os.path.exists(output_name) and get_file_type(output_name) == "image":
with open(output_name, "rb") as f:
myfile = File(f)
media.sprites.save(
content=myfile,
name=get_file_name(media.media_file.path) + "sprites.jpg",
)
except BaseException:
pass
return True
@task(name="create_hls", queue="long_tasks")
def create_hls(friendly_token):
"""Creates HLS file for media, uses Bento4 mp4hls command"""
if not hasattr(settings, "MP4HLS_COMMAND"):
logger.info("Bento4 mp4hls command is missing from configuration")
return False
if not os.path.exists(settings.MP4HLS_COMMAND):
logger.info("Bento4 mp4hls command is missing")
return False
try:
media = Media.objects.get(friendly_token=friendly_token)
except BaseException:
logger.info("failed to get media with friendly_token %s" % friendly_token)
return False
p = media.uid.hex
output_dir = os.path.join(settings.HLS_DIR, p)
encodings = media.encodings.filter(
profile__extension="mp4", status="success", chunk=False, profile__codec="h264"
)
if encodings:
existing_output_dir = None
if os.path.exists(output_dir):
existing_output_dir = output_dir
output_dir = os.path.join(settings.HLS_DIR, p + produce_friendly_token())
files = " ".join([f.media_file.path for f in encodings if f.media_file])
cmd = "{0} --segment-duration=4 --output-dir={1} {2}".format(
settings.MP4HLS_COMMAND, output_dir, files
)
ret = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)
if existing_output_dir:
# override content with -T !
cmd = "cp -rT {0} {1}".format(output_dir, existing_output_dir)
ret = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)
shutil.rmtree(output_dir)
output_dir = existing_output_dir
pp = os.path.join(output_dir, "master.m3u8")
if os.path.exists(pp):
if media.hls_file != pp:
media.hls_file = pp
media.save(update_fields=["hls_file"])
return True
@task(name="check_running_states", queue="short_tasks")
def check_running_states():
# Experimental - unused
"""Check stale running encodings and delete/reencode media"""
encodings = Encoding.objects.filter(status="running")
logger.info("got {0} encodings that are in state running".format(encodings.count()))
changed = 0
for encoding in encodings:
now = datetime.now(encoding.update_date.tzinfo)
if (now - encoding.update_date).seconds > settings.RUNNING_STATE_STALE:
media = encoding.media
profile = encoding.profile
task_id = encoding.task_id
# terminate task
if task_id:
revoke(task_id, terminate=True)
encoding.delete()
media.encode(profiles=[profile])
# TODO: allign with new code + chunksize...
changed += 1
if changed:
logger.info("changed from running to pending on {0} items".format(changed))
return True
@task(name="check_media_states", queue="short_tasks")
def check_media_states():
# Experimental - unused
# check encoding status of not success media
media = Media.objects.filter(
Q(encoding_status="running")
| Q(encoding_status="fail")
| Q(encoding_status="pending")
)
logger.info("got {0} media that are not in state success".format(media.count()))
changed = 0
for m in media:
m.set_encoding_status()
m.save(update_fields=["encoding_status"])
changed += 1
if changed:
logger.info("changed encoding status to {0} media items".format(changed))
return True
@task(name="check_pending_states", queue="short_tasks")
def check_pending_states():
# Experimental - unused
# check encoding profiles that are on state pending and not on a queue
encodings = Encoding.objects.filter(status="pending")
if not encodings:
return True
changed = 0
tasks = list_tasks()
task_ids = tasks["task_ids"]
media_profile_pairs = tasks["media_profile_pairs"]
for encoding in encodings:
if encoding.task_id and encoding.task_id in task_ids:
# encoding is in one of the active/reserved/scheduled tasks list
continue
elif (
encoding.media.friendly_token,
encoding.profile.id,
) in media_profile_pairs:
continue
# encoding is in one of the reserved/scheduled tasks list.
# has no task_id but will be run, so need to re-enter the queue
else:
media = encoding.media
profile = encoding.profile
encoding.delete()
media.encode(profiles=[profile], force=False)
changed += 1
if changed:
logger.info(
"set to the encode queue {0} encodings that were on pending state".format(
changed
)
)
return True
@task(name="check_missing_profiles", queue="short_tasks")
def check_missing_profiles():
# Experimental - unused
# check if video files have missing profiles. If so, add them
media = Media.objects.filter(media_type="video")
profiles = list(EncodeProfile.objects.all())
changed = 0
for m in media:
existing_profiles = [p.profile for p in m.encodings.all()]
missing_profiles = [p for p in profiles if p not in existing_profiles]
if missing_profiles:
m.encode(profiles=missing_profiles, force=False)
# since we call with force=False
# encode_media won't delete existing profiles
# if they appear on the meanwhile (eg on a big queue)
changed += 1
if changed:
logger.info("set to the encode queue {0} profiles".format(changed))
return True
@task(name="clear_sessions", queue="short_tasks")
def clear_sessions():
"""Clear expired sessions"""
try:
from importlib import import_module
from django.conf import settings
engine = import_module(settings.SESSION_ENGINE)
engine.SessionStore.clear_expired()
except BaseException:
return False
return True
@task(name="save_user_action", queue="short_tasks")
def save_user_action(
user_or_session, friendly_token=None, action="watch", extra_info=None
):
"""Short task that saves a user action"""
if action not in VALID_USER_ACTIONS:
return False
try:
media = Media.objects.get(friendly_token=friendly_token)
except BaseException:
return False
user = user_or_session.get("user_id")
session_key = user_or_session.get("user_session")
remote_ip = user_or_session.get("remote_ip_addr")
if user:
try:
user = User.objects.get(id=user)
except BaseException:
return False
if not (user or session_key):
return False
if action in ["like", "dislike", "report"]:
if not pre_save_action(
media=media,
user=user,
session_key=session_key,
action=action,
remote_ip=remote_ip,
):
return False
if action == "watch":
if user:
MediaAction.objects.filter(user=user, media=media, action="watch").delete()
else:
MediaAction.objects.filter(
session_key=session_key, media=media, action="watch"
).delete()
if action == "rate":
try:
score = extra_info.get("score")
rating_category = extra_info.get("category_id")
except BaseException:
# TODO: better error handling?
return False
try:
rating = Rating.objects.filter(
user=user, media=media, rating_category_id=rating_category
).first()
if rating:
rating.score = score
rating.save(update_fields=["score"])
else:
rating = Rating.objects.create(
user=user,
media=media,
rating_category_id=rating_category,
score=score,
)
except Exception as exc:
# TODO: more specific handling, for errors in score, or
# rating_category?
return False
ma = MediaAction(
user=user,
session_key=session_key,
media=media,
action=action,
extra_info=extra_info,
remote_ip=remote_ip,
)
ma.save()
if action == "watch":
media.views += 1
media.save(update_fields=["views"])
elif action == "report":
media.reported_times += 1
if media.reported_times >= settings.REPORTED_TIMES_THRESHOLD:
media.state = "private"
media.save(update_fields=["reported_times", "state"])
notify_users(
friendly_token=media.friendly_token,
action="media_reported",
extra=extra_info,
)
elif action == "like":
media.likes += 1
media.save(update_fields=["likes"])
elif action == "dislike":
media.dislikes += 1
media.save(update_fields=["dislikes"])
return True
@task(name="get_list_of_popular_media", queue="long_tasks")
def get_list_of_popular_media():
"""Experimental task for preparing media listing
for index page / recommended section
calculate and return the top 50 popular media, based on two rules
X = the top 25 videos that have the most views during the last week
Y = the most recent 25 videos that have been liked over the last 6 months
"""
valid_media_x = {}
valid_media_y = {}
basic_query = Q(listable=True)
media_x = Media.objects.filter(basic_query).values("friendly_token")
period_x = datetime.now() - timedelta(days=7)
period_y = datetime.now() - timedelta(days=30 * 6)
for media in media_x:
ft = media["friendly_token"]
num = MediaAction.objects.filter(
action_date__gte=period_x, action="watch", media__friendly_token=ft
).count()
if num:
valid_media_x[ft] = num
num = MediaAction.objects.filter(
action_date__gte=period_y, action="like", media__friendly_token=ft
).count()
if num:
valid_media_y[ft] = num
x = sorted(valid_media_x.items(), key=lambda kv: kv[1], reverse=True)[:25]
y = sorted(valid_media_y.items(), key=lambda kv: kv[1], reverse=True)[:25]
media_ids = [a[0] for a in x]
media_ids.extend([a[0] for a in y])
media_ids = list(set(media_ids))
cache.set("popular_media_ids", media_ids, 60 * 60 * 12)
logger.info("saved popular media ids")
return True
@task(name="update_listings_thumbnails", queue="long_tasks")
def update_listings_thumbnails():
"""Populate listings_thumbnail field for models"""
# Categories
used_media = []
saved = 0
qs = Category.objects.filter().order_by("-media_count")
for object in qs:
media = (
Media.objects.exclude(friendly_token__in=used_media)
.filter(category=object, state="public", is_reviewed=True)
.order_by("-views")
.first()
)
if media:
object.listings_thumbnail = media.thumbnail_url
object.save(update_fields=["listings_thumbnail"])
used_media.append(media.friendly_token)
saved += 1
logger.info("updated {} categories".format(saved))
# Tags
used_media = []
saved = 0
qs = Tag.objects.filter().order_by("-media_count")
for object in qs:
media = (
Media.objects.exclude(friendly_token__in=used_media)
.filter(tags=object, state="public", is_reviewed=True)
.order_by("-views")
.first()
)
if media:
object.listings_thumbnail = media.thumbnail_url
object.save(update_fields=["listings_thumbnail"])
used_media.append(media.friendly_token)
saved += 1
logger.info("updated {} tags".format(saved))
return True
@task_revoked.connect
def task_sent_handler(sender=None, headers=None, body=None, **kwargs):
# For encode_media tasks that are revoked,
# ffmpeg command won't be stopped, since
# it got started by a subprocess.
# Need to stop that process
# Also, removing the Encoding object,
# since the task that would prepare it was killed
# Maybe add a killed state for Encoding objects
try:
uid = kwargs["request"].task_id
if uid:
encoding = Encoding.objects.get(task_id=uid)
encoding.delete()
logger.info("deleted the Encoding object")
if encoding.temp_file:
kill_ffmpeg_process(encoding.temp_file)
except BaseException:
pass
return True
def kill_ffmpeg_process(filepath):
# this is not ideal, ffmpeg pid could be linked to the Encoding object
cmd = "ps aux|grep 'ffmpeg'|grep %s|grep -v grep |awk '{print $2}'" % filepath
result = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)
pid = result.stdout.decode("utf-8").strip()
if pid:
cmd = "kill -9 %s" % pid
result = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)
return result
@task(name="remove_media_file", base=Task, queue="long_tasks")
def remove_media_file(media_file=None):
rm_file(media_file)
return True
# TODO LIST
# 1 chunks are deleted from original server when file is fully encoded.
# however need to enter this logic in cases of fail as well
# 2 script to delete chunks in fail status
# (and check for their encdings, and delete them as well, along with
# all chunks)
# 3 beat task, remove chunks

0
files/tests.py Normal file
View File

91
files/urls.py Normal file
View File

@@ -0,0 +1,91 @@
from django.conf.urls.static import static
from django.conf import settings
from django.conf.urls import url, include
from django.urls import path
from . import views
from . import management_views
from .feeds import RssMediaFeed
urlpatterns = [
url(r"^$", views.index),
url(r"^about", views.about, name="about"),
url(r"^add_subtitle", views.add_subtitle, name="add_subtitle"),
url(r"^categories$", views.categories, name="categories"),
url(r"^contact$", views.contact, name="contact"),
url(r"^edit", views.edit_media, name="edit_media"),
url(r"^embed", views.embed_media, name="get_embed"),
url(r"^featured$", views.featured_media),
url(r"^fu/", include(("uploader.urls", "uploader"), namespace="uploader")),
url(r"^history$", views.history, name="history"),
url(r"^liked$", views.liked_media, name="liked_media"),
url(r"^latest$", views.latest_media),
url(r"^members", views.members, name="members"),
url(
r"^playlist/(?P<friendly_token>[\w]*)$",
views.view_playlist,
name="get_playlist",
),
url(
r"^playlists/(?P<friendly_token>[\w]*)$",
views.view_playlist,
name="get_playlist",
),
url(r"^popular$", views.recommended_media),
url(r"^recommended$", views.recommended_media),
path("rss/", RssMediaFeed()),
url(r"^search", views.search, name="search"),
url(r"^scpublisher", views.upload_media, name="upload_media"),
url(r"^tags", views.tags, name="tags"),
url(r"^tos$", views.tos, name="terms_of_service"),
url(r"^view", views.view_media, name="get_media"),
url(r"^upload", views.upload_media, name="upload_media"),
# API VIEWS
url(r"^api/v1/media$", views.MediaList.as_view()),
url(r"^api/v1/media/$", views.MediaList.as_view()),
url(
r"^api/v1/media/(?P<friendly_token>[\w]*)$",
views.MediaDetail.as_view(),
name="api_get_media",
),
url(
r"^api/v1/media/encoding/(?P<encoding_id>[\w]*)$",
views.EncodingDetail.as_view(),
name="api_get_encoding",
),
url(r"^api/v1/search$", views.MediaSearch.as_view()),
url(
r"^api/v1/media/(?P<friendly_token>[\w]*)/actions$",
views.MediaActions.as_view(),
),
url(r"^api/v1/categories$", views.CategoryList.as_view()),
url(r"^api/v1/tags$", views.TagList.as_view()),
url(r"^api/v1/comments$", views.CommentList.as_view()),
url(
r"^api/v1/media/(?P<friendly_token>[\w]*)/comments$",
views.CommentDetail.as_view(),
),
url(
r"^api/v1/media/(?P<friendly_token>[\w]*)/comments/(?P<uid>[\w-]*)$",
views.CommentDetail.as_view(),
),
url(r"^api/v1/playlists$", views.PlaylistList.as_view()),
url(r"^api/v1/playlists/$", views.PlaylistList.as_view()),
url(
r"^api/v1/playlists/(?P<friendly_token>[\w]*)$",
views.PlaylistDetail.as_view(),
name="api_get_playlist",
),
url(r"^api/v1/user/action/(?P<action>[\w]*)$", views.UserActions.as_view()),
# ADMIN VIEWS
url(r"^api/v1/encode_profiles/$", views.EncodeProfileList.as_view()),
url(r"^api/v1/manage_media$", management_views.MediaList.as_view()),
url(r"^api/v1/manage_comments$", management_views.CommentList.as_view()),
url(r"^api/v1/manage_users$", management_views.UserList.as_view()),
url(r"^api/v1/tasks$", views.TasksList.as_view()),
url(r"^api/v1/tasks/$", views.TasksList.as_view()),
url(r"^api/v1/tasks/(?P<friendly_token>[\w|\W]*)$", views.TaskDetail.as_view()),
url(r"^manage/comments$", views.manage_comments, name="manage_comments"),
url(r"^manage/media$", views.manage_media, name="manage_media"),
url(r"^manage/users$", views.manage_users, name="manage_users"),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)

1273
files/views.py Normal file

File diff suppressed because it is too large Load Diff