Deer friend:

You are thinking that, in order to motivate yourself into actually studying meaningfully at an accelerated pace, you should try recording yourself go through the motions of studying. You simply cannot resist coding up some Python scripts here and there...

You aim to accomplish this by:

  1. installing a program (for example AutoScreenshot) that automatically captures your whole computer's current screen at an interval of one minute;
  2. tweaking the program's settings so that the saved screenshot is a PNG with maximum lossless compression with a filename format of photo_%Y-%M-%D_%H-%N-%S to take advantage of ISO 8601's convenience in sorting;
  3. stitching these screenshots together into a timelapse video using a video editing software like DaVinci Resolve or kdenlive;
  4. Adding background music for the timelapse video which is this soundtrack from Sonic Generations in reference to this video of Takanashi Kiara's determination
  5. ???
  6. Profit!
  • You hope that this will motivate you to study more effectively.
  • You hope that this helps you maintain focus and momentum throughout the day.
  • You hope that this helps you maintain a sense of time and progress.
  • You hope that this helps you maintain a sense of purpose and direction.
  • You hope that this helps you maintain a sense of accomplishment and fulfillment.
  • You hope that this helps you maintain a sense of self-improvement and growth.
  • You hope that this helps you maintain a sense of discipline and consistency.
  • You hope that this helps you maintain a sense of motivation and inspiration.
  • You hope that this helps you maintain a sense of joy and happiness
  • You hope that this helps you maintain a sense of gratitude and appreciation.
  • You hope that this helps you maintain a sense of love and connection.
  • You hope that this helps you maintain a sense of courage and resilience.
  • You hope that this helps you maintain a sense of patience and perseverance.
Let's see how this goes.
#!/usr/bin/env python3

"""
Timelapse Video Creation Script

This script creates a short timelapse video from a series of screenshots taken throughout the day.
It processes image files, creates a video, adds background music, and optimizes the final output.

Requirements:
    - Python 3.x
    - External libraries: numpy, opencv-python, pillow
    - FFmpeg installed and available in the system PATH

Usage:
    1. Install required libraries: `pip3 install numpy opencv-python pillow`
    2. Ensure FFmpeg is installed and available in your system PATH
    3. Set the appropriate paths in the script
    4. Run the script: python3 script_name.py

The script will generate a timelapse video with the current date as the filename.
"""
import os
import re
import subprocess
from datetime import datetime

import cv2
import numpy as np
from PIL import Image

# Directory containing the image files
image_dir = r"path/to/images"

# Output video file name
output_file = f"path/to/output_folder/{datetime.now().date().strftime('%Y-%m-%d')}.mp4"

# Set the number of frames per image
frames_per_image = 4

# Get the current date
current_date = datetime.now().date()

# Precompile the regular expression pattern
pattern = re.compile(r"photo_(\d{4})-(\d{2})-(\d{2})_(\d{2})-(\d{2})-(\d{2})")

# Find all image files in the current directory
image_files = [
    f
    for f in os.listdir(image_dir)
    if f.endswith(".png") or f.endswith(".jpg") or f.endswith(".jpeg")
]

def process_image(image_file):
    match = pattern.search(image_file)
    if match:
        year, month, day, hour, minute, second = [int(g) for g in match.groups()]
        timestamp = datetime(year, month, day, hour, minute, second)
        if True: # timestamp.date() == current_date:
            image_path = os.path.join(image_dir, image_file)
            image = np.array(Image.open(image_path))
            return (image, timestamp)
    return None

def main():
    # Parallel processing of images
    from concurrent.futures import ProcessPoolExecutor

    with ProcessPoolExecutor() as executor:
        images_and_timestamps = [
            result for result in executor.map(process_image, image_files) if result
        ]

    # Sort the images based on their timestamp
    images_and_timestamps.sort(key=lambda x: x[1])

    # Calculate the total number of frames
    total_frames = sum(frames_per_image for _, _ in images_and_timestamps)

    # Create a video writer object
    width, height = images_and_timestamps[0][0].shape[:2][::-1]
    fourcc = cv2.VideoWriter_fourcc(*"mp4v")
    fps = 30
    video_writer = cv2.VideoWriter(output_file, fourcc, fps, (width, height))

    # Write the frames to the video
    for image, _ in images_and_timestamps:
        for _ in range(frames_per_image):
            video_writer.write(image[:, :, ::-1])

    # Release the video writer
    video_writer.release()

    # ------------------------------------------------------------------------------

    # Path to the ffmpeg binary
    # ffmpeg_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "ffmpeg.exe")

    # Paths to the video and audio files
    video_file = output_file
    audio_file = r"path/to/bgm.mp3"

    # Get the duration of the video and audio files
    video_duration_cmd = ["ffmpeg", "-i", video_file, "-f", "null", "-"]
    video_duration_output = subprocess.run(
        video_duration_cmd,
        stdout=subprocess.PIPE,
        stderr=subprocess.PIPE,
        check=True,
        universal_newlines=True,
    ).stderr
    video_duration = (
        float(
            re.search(r"Duration: (\d+:\d+:\d+\.\d+)", video_duration_output)
            .group(1)
            .split(":")[0]
        )
        * 3600
        + float(
            re.search(r"Duration: (\d+:\d+:\d+\.\d+)", video_duration_output)
            .group(1)
            .split(":")[1]
        )
        * 60
        + float(
            re.search(r"Duration: (\d+:\d+:\d+\.\d+)", video_duration_output)
            .group(1)
            .split(":")[2]
        )
    )

    audio_duration_cmd = ["ffmpeg", "-i", audio_file, "-f", "null", "-"]
    audio_duration_output = subprocess.run(
        audio_duration_cmd,
        stdout=subprocess.PIPE,
        stderr=subprocess.PIPE,
        check=True,
        universal_newlines=True,
    ).stderr
    audio_duration = (
        float(
            re.search(r"Duration: (\d+:\d+:\d+\.\d+)", audio_duration_output)
            .group(1)
            .split(":")[0]
        )
        * 3600
        + float(
            re.search(r"Duration: (\d+:\d+:\d+\.\d+)", audio_duration_output)
            .group(1)
            .split(":")[1]
        )
        * 60
        + float(
            re.search(r"Duration: (\d+:\d+:\d+\.\d+)", audio_duration_output)
            .group(1)
            .split(":")[2]
        )
    )

    # Construct the ffmpeg command
    cmd = [
        "ffmpeg",
        "-i",
        video_file,
        "-stream_loop",
        "-1",
        "-i",
        audio_file,
        "-c:v",
        "copy",
        "-c:a",
        "aac",
        "-map",
        "0:v:0",
        "-map",
        "1:a:0",
        "-shortest",
        "-y",
        f"{os.path.splitext(output_file)[0]}_compiled.mp4",
    ]

    # If the video is longer than the audio, loop the audio
    if video_duration > audio_duration:
        cmd.insert(4, "-loop", "1")

    # Run the ffmpeg command
    subprocess.run(cmd, check=True)

    os.remove(output_file)

    # Command for ffmpeg to reencode the video
    cmd = [
        "ffmpeg",
        "-i",
        f"{os.path.splitext(output_file)[0]}_compiled.mp4",
        "-c:v",
        "libx265",
        "-crf",
        "28",
        "-c:a",
        "copy",
        output_file,
    ]

    try:
        # Run the ffmpeg command
        subprocess.run(cmd, check=True)
        print(f"Video reencoded successfully to {output_file}")
    except subprocess.CalledProcessError as e:
        print(f"Error occurred while reencoding video: {e.returncode}")

    os.remove(f"{os.path.splitext(output_file)[0]}_compiled.mp4")

if __name__ == '__main__':
    main()

Sincerely,

Twice B. Hathaway