Skip to content

Commit 3d58aa7

Browse files
committed
timeline_webview: Formatting & cleanup
- Rewrap lines, reindent, fix spacing - Replace print() with log.info() - Eliminate unused imports, variables - No bare Excepts
1 parent 90e7bc4 commit 3d58aa7

File tree

1 file changed

+74
-95
lines changed

1 file changed

+74
-95
lines changed

src/windows/views/timeline_webview.py

+74-95
Original file line numberDiff line numberDiff line change
@@ -28,12 +28,10 @@
2828
"""
2929

3030
import os
31-
import sys
3231
import time
3332
from copy import deepcopy
3433
from functools import partial
3534
from random import uniform
36-
from urllib.parse import urlparse
3735
from operator import itemgetter
3836

3937
import openshot # Python module for libopenshot (required video editing module installed separately)
@@ -181,7 +179,7 @@ def eval_js(self, code, retries=0):
181179
# Not ready, try again in a few milliseconds
182180
if retries > 1:
183181
log.warning("TimelineWebView::eval_js() called before document ready event. Script queued: %s" % code)
184-
QTimer.singleShot(100, partial(self.eval_js, code, retries+1))
182+
QTimer.singleShot(100, partial(self.eval_js, code, retries + 1))
185183
return None
186184
else:
187185
# Execute JS code
@@ -228,7 +226,7 @@ def update_clip_data(self, clip_json, only_basic_props=True, ignore_reader=False
228226
clip_data = json.loads(clip_json)
229227
else:
230228
clip_data = clip_json
231-
except:
229+
except Exception:
232230
# Failed to parse json, do nothing
233231
return
234232

@@ -250,7 +248,8 @@ def update_clip_data(self, clip_json, only_basic_props=True, ignore_reader=False
250248
existing_clip.data["start"] = clip_data["start"]
251249
existing_clip.data["end"] = clip_data["end"]
252250

253-
# Always remove the Reader attribute (since nothing updates it, and we are wrapping clips in FrameMappers anyway)
251+
# Always remove the Reader attribute (since nothing updates it,
252+
# and we are wrapping clips in FrameMappers anyway)
254253
if ignore_reader and "reader" in existing_clip.data:
255254
existing_clip.data.pop("reader")
256255

@@ -576,7 +575,6 @@ def ShowClipMenu(self, clip_id=None):
576575
Fade_Menu.addMenu(Position_Menu)
577576
menu.addMenu(Fade_Menu)
578577

579-
580578
# Animate Menu
581579
Animate_Menu = QMenu(_("Animate"), self)
582580
Animate_None = Animate_Menu.addAction(_("No Animation"))
@@ -828,7 +826,7 @@ def ShowClipMenu(self, clip_id=None):
828826
return menu.popup(QCursor.pos())
829827

830828
def Transform_Triggered(self, action, clip_ids):
831-
print("Transform_Triggered")
829+
log.info("Transform_Triggered")
832830

833831
# Emit signal to transform this clip (for the 1st clip id)
834832
if clip_ids:
@@ -930,9 +928,9 @@ def Split_Audio_Triggered(self, action, clip_ids):
930928
all_tracks = get_app().project.get("layers")
931929

932930
# Clear audio override
933-
p = openshot.Point(1, -1.0, openshot.CONSTANT) # Override has_audio keyframe to False
931+
p = openshot.Point(1, -1.0, openshot.CONSTANT) # Override has_audio keyframe to False
934932
p_object = json.loads(p.Json())
935-
clip.data["has_audio"] = { "Points" : [p_object]}
933+
clip.data["has_audio"] = {"Points": [p_object]}
936934

937935
# Remove the ID property from the clip (so it becomes a new one)
938936
clip.id = None
@@ -947,12 +945,12 @@ def Split_Audio_Triggered(self, action, clip_ids):
947945
# Clear channel filter on new clip
948946
p = openshot.Point(1, -1.0, openshot.CONSTANT)
949947
p_object = json.loads(p.Json())
950-
clip.data["channel_filter"] = { "Points" : [p_object]}
948+
clip.data["channel_filter"] = {"Points": [p_object]}
951949

952950
# Filter out video on the new clip
953-
p = openshot.Point(1, 0.0, openshot.CONSTANT) # Override has_video keyframe to False
951+
p = openshot.Point(1, 0.0, openshot.CONSTANT) # Override has_video keyframe to False
954952
p_object = json.loads(p.Json())
955-
clip.data["has_video"] = { "Points" : [p_object]}
953+
clip.data["has_video"] = {"Points": [p_object]}
956954
# Also set scale to None
957955
# Workaround for https://github.com/OpenShot/openshot-qt/issues/2882
958956
clip.data["scale"] = openshot.SCALE_NONE
@@ -969,7 +967,7 @@ def Split_Audio_Triggered(self, action, clip_ids):
969967
continue
970968

971969
# Adjust the layer, so this new audio clip doesn't overlap the parent
972-
clip.data['layer'] = next_track_number # Add to layer below clip
970+
clip.data['layer'] = next_track_number # Add to layer below clip
973971

974972
# Adjust the clip title
975973
channel_label = _("(all channels)")
@@ -992,12 +990,12 @@ def Split_Audio_Triggered(self, action, clip_ids):
992990
# Each clip is filtered to a different channel
993991
p = openshot.Point(1, channel, openshot.CONSTANT)
994992
p_object = json.loads(p.Json())
995-
clip.data["channel_filter"] = { "Points" : [p_object]}
993+
clip.data["channel_filter"] = {"Points": [p_object]}
996994

997995
# Filter out video on the new clip
998-
p = openshot.Point(1, 0.0, openshot.CONSTANT) # Override has_video keyframe to False
996+
p = openshot.Point(1, 0.0, openshot.CONSTANT) # Override has_video keyframe to False
999997
p_object = json.loads(p.Json())
1000-
clip.data["has_video"] = { "Points" : [p_object]}
998+
clip.data["has_video"] = {"Points": [p_object]}
1001999
# Also set scale to None
10021000
# Workaround for https://github.com/OpenShot/openshot-qt/issues/2882
10031001
clip.data["scale"] = openshot.SCALE_NONE
@@ -1014,7 +1012,7 @@ def Split_Audio_Triggered(self, action, clip_ids):
10141012
continue
10151013

10161014
# Adjust the layer, so this new audio clip doesn't overlap the parent
1017-
clip.data['layer'] = max(next_track_number, 0) # Add to layer below clip
1015+
clip.data['layer'] = max(next_track_number, 0) # Add to layer below clip
10181016

10191017
# Adjust the clip title
10201018
channel_label = _("(channel %s)") % (channel + 1)
@@ -1041,9 +1039,9 @@ def Split_Audio_Triggered(self, action, clip_ids):
10411039
continue
10421040

10431041
# Filter out audio on the original clip
1044-
p = openshot.Point(1, 0.0, openshot.CONSTANT) # Override has_audio keyframe to False
1042+
p = openshot.Point(1, 0.0, openshot.CONSTANT) # Override has_audio keyframe to False
10451043
p_object = json.loads(p.Json())
1046-
clip.data["has_audio"] = { "Points" : [p_object]}
1044+
clip.data["has_audio"] = {"Points": [p_object]}
10471045

10481046
# Save filter on original clip
10491047
self.update_clip_data(clip.data, only_basic_props=False, ignore_reader=True)
@@ -1082,36 +1080,35 @@ def Layout_Triggered(self, action, clip_ids):
10821080
# Clear scale keyframes
10831081
p = openshot.Point(1, 1.0, openshot.BEZIER)
10841082
p_object = json.loads(p.Json())
1085-
clip.data["scale_x"] = { "Points" : [p_object]}
1086-
clip.data["scale_y"] = { "Points" : [p_object]}
1083+
clip.data["scale_x"] = {"Points": [p_object]}
1084+
clip.data["scale_y"] = {"Points": [p_object]}
10871085

10881086
# Clear location keyframes
10891087
p = openshot.Point(1, 0.0, openshot.BEZIER)
10901088
p_object = json.loads(p.Json())
1091-
clip.data["location_x"] = { "Points" : [p_object]}
1092-
clip.data["location_y"] = { "Points" : [p_object]}
1089+
clip.data["location_x"] = {"Points": [p_object]}
1090+
clip.data["location_y"] = {"Points": [p_object]}
10931091

10941092
if action == MENU_LAYOUT_CENTER or \
1095-
action == MENU_LAYOUT_TOP_LEFT or \
1096-
action == MENU_LAYOUT_TOP_RIGHT or \
1097-
action == MENU_LAYOUT_BOTTOM_LEFT or \
1098-
action == MENU_LAYOUT_BOTTOM_RIGHT:
1093+
action == MENU_LAYOUT_TOP_LEFT or \
1094+
action == MENU_LAYOUT_TOP_RIGHT or \
1095+
action == MENU_LAYOUT_BOTTOM_LEFT or \
1096+
action == MENU_LAYOUT_BOTTOM_RIGHT:
10991097
# Reset scale mode
11001098
clip.data["scale"] = openshot.SCALE_FIT
11011099
clip.data["gravity"] = new_gravity
11021100

11031101
# Add scale keyframes
11041102
p = openshot.Point(1, 0.5, openshot.BEZIER)
11051103
p_object = json.loads(p.Json())
1106-
clip.data["scale_x"] = { "Points" : [p_object]}
1107-
clip.data["scale_y"] = { "Points" : [p_object]}
1104+
clip.data["scale_x"] = {"Points": [p_object]}
1105+
clip.data["scale_y"] = {"Points": [p_object]}
11081106

11091107
# Add location keyframes
11101108
p = openshot.Point(1, 0.0, openshot.BEZIER)
11111109
p_object = json.loads(p.Json())
1112-
clip.data["location_x"] = { "Points" : [p_object]}
1113-
clip.data["location_y"] = { "Points" : [p_object]}
1114-
1110+
clip.data["location_x"] = {"Points": [p_object]}
1111+
clip.data["location_y"] = {"Points": [p_object]}
11151112

11161113
if action == MENU_LAYOUT_ALL_WITH_ASPECT:
11171114
# Update all intersecting clips
@@ -1452,7 +1449,7 @@ def Paste_Triggered(self, action, position, layer_id, clip_ids, tran_ids):
14521449
continue
14531450

14541451
# Apply clipboard to clip (there should only be a single key in this dict)
1455-
for k,v in self.copy_clipboard[list(self.copy_clipboard)[0]].items():
1452+
for k, v in self.copy_clipboard[list(self.copy_clipboard)[0]].items():
14561453
if k != 'id':
14571454
# Overwrite clips properties (which are in the clipboard)
14581455
clip.data[k] = v
@@ -1489,9 +1486,9 @@ def Nudge_Triggered(self, action, clip_ids, tran_ids):
14891486
fps = get_app().project.get("fps")
14901487
fps_float = float(fps["num"]) / float(fps["den"])
14911488
nudgeDistance = float(action) / float(fps_float)
1492-
nudgeDistance /= 2.0 # 1/2 frame
1489+
nudgeDistance /= 2.0 # 1/2 frame
14931490
if abs(nudgeDistance) < 0.01:
1494-
nudgeDistance = 0.01 * action # nudge is less than the minimum of +/- 0.01s
1491+
nudgeDistance = 0.01 * action # nudge is less than the minimum of +/- 0.01s
14951492
log.info("Nudging by %s sec" % nudgeDistance)
14961493

14971494
# Loop through each selected clip (find furthest left and right edge)
@@ -1566,7 +1563,6 @@ def Nudge_Triggered(self, action, clip_ids, tran_ids):
15661563
# Save changes
15671564
self.update_transition_data(tran.data, only_basic_props=False)
15681565

1569-
15701566
def Align_Triggered(self, action, clip_ids, tran_ids):
15711567
"""Callback for alignment context menus"""
15721568
log.info(action)
@@ -1608,7 +1604,6 @@ def Align_Triggered(self, action, clip_ids, tran_ids):
16081604
if position + (end_of_tran - start_of_tran) > right_edge or right_edge == -1.0:
16091605
right_edge = position + (end_of_tran - start_of_tran)
16101606

1611-
16121607
# Loop through each selected clip (update position to align clips)
16131608
for clip_id in clip_ids:
16141609
# Get existing clip object
@@ -1985,10 +1980,6 @@ def Rotate_Triggered(self, action, clip_ids, position="Start of Clip"):
19851980
log.info(action)
19861981
prop_name = "rotation"
19871982

1988-
# Get FPS from project
1989-
fps = get_app().project.get("fps")
1990-
fps_float = float(fps["num"]) / float(fps["den"])
1991-
19921983
# Loop through each selected clip
19931984
for clip_id in clip_ids:
19941985

@@ -2036,9 +2027,9 @@ def Time_Triggered(self, action, clip_ids, speed="1X", playhead_position=0.0):
20362027

20372028
# Loop through each selected clip
20382029
for clip_id in clip_ids:
2039-
20402030
# Get existing clip object
20412031
clip = Clip.get(id=clip_id)
2032+
20422033
if not clip:
20432034
# Invalid clip, skip to next item
20442035
continue
@@ -2061,16 +2052,15 @@ def Time_Triggered(self, action, clip_ids, speed="1X", playhead_position=0.0):
20612052
if "original_data" in clip.data.keys():
20622053
original_duration = clip.data["original_data"]["duration"]
20632054

2064-
print('ORIGINAL DURATION: %s' % original_duration)
2065-
print(clip.data)
2055+
log.info('ORIGINAL DURATION: %s' % original_duration)
2056+
log.info(clip.data)
20662057

20672058
# Extend end & duration (due to freeze)
20682059
clip.data["end"] = float(clip.data["end"]) + freeze_seconds
20692060
clip.data["duration"] = float(clip.data["duration"]) + freeze_seconds
20702061
clip.data["reader"]["video_length"] = float(clip.data["reader"]["video_length"]) + freeze_seconds
20712062

20722063
# Determine start frame from position
2073-
freeze_length_frames = round(freeze_seconds * fps_float) + 1
20742064
start_animation_seconds = float(clip.data["start"]) + (playhead_position - float(clip.data["position"]))
20752065
start_animation_frames = round(start_animation_seconds * fps_float) + 1
20762066
start_animation_frames_value = start_animation_frames
@@ -2178,7 +2168,7 @@ def Time_Triggered(self, action, clip_ids, speed="1X", playhead_position=0.0):
21782168
# Clear all keyframes
21792169
p = openshot.Point(start_animation, 0.0, openshot.LINEAR)
21802170
p_object = json.loads(p.Json())
2181-
clip.data[prop_name] = { "Points" : [p_object]}
2171+
clip.data['time'] = {"Points": [p_object]}
21822172

21832173
# Get the ending frame
21842174
end_of_clip = round(float(clip.data["end"]) * fps_float) + 1
@@ -2264,44 +2254,45 @@ def show_all_clips(self, clip, stretch=False):
22642254
for row in range(0, number_of_rows):
22652255

22662256
# Loop through clips on this row
2267-
column_string = " - - - "
22682257
for col in range(0, max_clips_on_row):
2269-
if clip_index < number_of_clips:
2270-
# Calculate X & Y
2271-
X = float(col) * width
2272-
Y = float(row) * height
2273-
2274-
# Modify clip layout settings
2275-
selected_clip = available_clips[clip_index]
2276-
selected_clip.data["gravity"] = openshot.GRAVITY_TOP_LEFT
2277-
2278-
if stretch:
2279-
selected_clip.data["scale"] = openshot.SCALE_STRETCH
2280-
else:
2281-
selected_clip.data["scale"] = openshot.SCALE_FIT
2282-
2283-
# Set scale keyframes
2284-
w = openshot.Point(1, width, openshot.BEZIER)
2285-
w_object = json.loads(w.Json())
2286-
selected_clip.data["scale_x"] = { "Points" : [w_object]}
2287-
h = openshot.Point(1, height, openshot.BEZIER)
2288-
h_object = json.loads(h.Json())
2289-
selected_clip.data["scale_y"] = { "Points" : [h_object]}
2290-
x_point = openshot.Point(1, X, openshot.BEZIER)
2291-
x_object = json.loads(x_point.Json())
2292-
selected_clip.data["location_x"] = { "Points" : [x_object]}
2293-
y_point = openshot.Point(1, Y, openshot.BEZIER)
2294-
y_object = json.loads(y_point.Json())
2295-
selected_clip.data["location_y"] = { "Points" : [y_object]}
2296-
2297-
log.info('Updating clip id: %s' % selected_clip.data["id"])
2298-
log.info('width: %s, height: %s' % (width, height))
2299-
2300-
# Increment Clip Index
2301-
clip_index += 1
2258+
if clip_index >= number_of_clips:
2259+
continue
23022260

2303-
# Save changes
2304-
self.update_clip_data(selected_clip.data, only_basic_props=False, ignore_reader=True)
2261+
# Calculate X & Y
2262+
X = float(col) * width
2263+
Y = float(row) * height
2264+
2265+
# Modify clip layout settings
2266+
selected_clip = available_clips[clip_index]
2267+
selected_clip.data["gravity"] = openshot.GRAVITY_TOP_LEFT
2268+
2269+
if stretch:
2270+
selected_clip.data["scale"] = openshot.SCALE_STRETCH
2271+
else:
2272+
selected_clip.data["scale"] = openshot.SCALE_FIT
2273+
2274+
# Set scale keyframes
2275+
w = openshot.Point(1, width, openshot.BEZIER)
2276+
w_object = json.loads(w.Json())
2277+
selected_clip.data["scale_x"] = {"Points": [w_object]}
2278+
h = openshot.Point(1, height, openshot.BEZIER)
2279+
h_object = json.loads(h.Json())
2280+
selected_clip.data["scale_y"] = {"Points": [h_object]}
2281+
x_point = openshot.Point(1, X, openshot.BEZIER)
2282+
x_object = json.loads(x_point.Json())
2283+
selected_clip.data["location_x"] = {"Points": [x_object]}
2284+
y_point = openshot.Point(1, Y, openshot.BEZIER)
2285+
y_object = json.loads(y_point.Json())
2286+
selected_clip.data["location_y"] = {"Points": [y_object]}
2287+
2288+
log.info('Updating clip id: %s' % selected_clip.data["id"])
2289+
log.info('width: %s, height: %s' % (width, height))
2290+
2291+
# Increment Clip Index
2292+
clip_index += 1
2293+
2294+
# Save changes
2295+
self.update_clip_data(selected_clip.data, only_basic_props=False, ignore_reader=True)
23052296

23062297
def Reverse_Transition_Triggered(self, tran_ids):
23072298
"""Callback for reversing a transition"""
@@ -2520,7 +2511,7 @@ def movePlayhead(self, position_frames):
25202511
def centerOnPlayhead(self):
25212512
""" Center the timeline on the current playhead position """
25222513
# Execute JavaScript to center the timeline
2523-
cmd = JS_SCOPE_SELECTOR + '.centerOnPlayhead();';
2514+
cmd = JS_SCOPE_SELECTOR + '.centerOnPlayhead();'
25242515
self.eval_js(cmd)
25252516

25262517
@pyqtSlot(int)
@@ -2660,9 +2651,6 @@ def dragEnterEvent(self, event):
26602651
# Add Clip
26612652
def addClip(self, data, position):
26622653

2663-
# Get app object
2664-
app = get_app()
2665-
26662654
# Search for matching file in project data (if any)
26672655
file_id = data[0]
26682656
file = File.get(id=file_id)
@@ -2671,13 +2659,6 @@ def addClip(self, data, position):
26712659
# File not found, do nothing
26722660
return
26732661

2674-
if (file.data["media_type"] == "video" or file.data["media_type"] == "image"):
2675-
# Determine thumb path
2676-
thumb_path = os.path.join(info.THUMBNAIL_PATH, "%s.png" % file.data["id"])
2677-
else:
2678-
# Audio file
2679-
thumb_path = os.path.join(info.PATH, "images", "AudioThumbnail.png")
2680-
26812662
# Get file name
26822663
filename = os.path.basename(file.data["path"])
26832664

@@ -2698,8 +2679,6 @@ def addClip(self, data, position):
26982679
return # Do nothing
26992680

27002681
# Check for optional start and end attributes
2701-
start_frame = 1
2702-
end_frame = new_clip["reader"]["duration"]
27032682
if 'start' in file.data.keys():
27042683
new_clip["start"] = file.data['start']
27052684
if 'end' in file.data.keys():

0 commit comments

Comments
 (0)