Moving photo test

Using a bowden tube (PTFE tube 1mm) with a 3d printed holder.
(ptfe is very smooth)

This is a test setup.
I’ve removed arms and the flute on the bottom picture using AI.
Then printed backpicture and top on sticky paper and stuck it on sturdy heavy paper.

I want to make a moving picture which moves our band members when there is sound (music).
(Guitar, Harp, Bagpipes and Flute)

First test

Machine Learning Waveforms

I’ve used machine learning before, this is my waveform classifier.

(Next to do, more classes)

Class0 – Train data

Class1 – Train data

Running train script

Test data

Prediction

Code ( generates a h5 classifier )

import tensorflow as tf

from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, Input
import os


# Define dataset path
DATASET_PATH = "data/"

# Load images using ImageDataGenerator
datagen = ImageDataGenerator(rescale=1./255, validation_split=0.2)

train_data = datagen.flow_from_directory(
    DATASET_PATH,
    target_size=(128, 128),
    batch_size=32,
    class_mode='categorical',
    subset="training"
)

val_data = datagen.flow_from_directory(
    DATASET_PATH,
    target_size=(128, 128),
    batch_size=32,
    class_mode='categorical',
    subset="validation"
)

# Define CNN model
model = Sequential([
    Input(shape=(128, 128, 3)),
    Conv2D(32, (3,3), activation='relu'),
    MaxPooling2D(2,2),
    
    Conv2D(64, (3,3), activation='relu'),
    MaxPooling2D(2,2),
    
    Flatten(),
    Dense(128, activation='relu'),
    Dropout(0.5),
    Dense(train_data.num_classes, activation='softmax')
])

# Compile model
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

# Train the CNN
model.fit(train_data, validation_data=val_data, epochs=10)

# Save model
model.save("waveform_classifier.h5")
print("Model saved as waveform_classifier.h5")

Code to predict class

import numpy as np
from tensorflow.keras.preprocessing import image
from tensorflow.keras.models import load_model

# Load trained model
model = load_model("waveform_classifier.h5")

# Load and preprocess image
def predict_waveform(image_path):
    img = image.load_img(image_path, target_size=(128, 128))
    img_array = image.img_to_array(img) / 255.0
    img_array = np.expand_dims(img_array, axis=0)

    # Predict class
    prediction = model.predict(img_array)
    predicted_class = np.argmax(prediction)
    
    print(f"Predicted Class: {predicted_class}")

for f in range(1,5):
    predict_waveform("testdata/" + str(f) + ".png")

Generate images from CSV

import pandas as pd
import matplotlib.pyplot as plt

def waveform_to_image(csv_file, signal_column="amplitude", save_path="waveform.png"):
    # Load CSV
    df = pd.read_csv(csv_file)

    # Extract signal (time,amplitude)
    signal = df[signal_column]

    # Plot waveform
    plt.figure(figsize=(4, 4))
    plt.ylim(0, 20)

    plt.plot(signal, color='black', linewidth=2)
    # Hide axes
    plt.axis('off')  

    # Save as an image
    plt.savefig(save_path, bbox_inches='tight', pad_inches=0)
    plt.close()
    print(f"Saved waveform image as {save_path}")

# Loop through files 1.csv to 32.csv and generate images
for i in range(1, 31):
    csv_filename = f"{i}.csv"
    png_filename = f"{i}.png"
    waveform_to_image(csv_filename, save_path=png_filename)

Cheapest electronic bagpipe chanter in 6 hours.

My proof of concept to build an electronic chanter for less than 4 euros.

It uses a Lolin32 Lite and a buzzer. (No libraries needed)
(Some wires, thumbtacks and a PVC tube.)

I still have to glue the thumbtacks

Plays accidentals (false fingering)
Vibrato
Has serial console debugging
Need other frequencies? Just edit the source.

CODE

int prevnote = 0;

// Note freq table
int ha = 880;
int hg = 783;
int fs = 739;
int f = 698;
int e = 659;
int d = 587;
int cs = 554;
int c = 523;
int b = 493;
int as = 466;
int la = 440;
int lg = 391;
int mute = 0;

// 8 Bits to note, luckly not 9 fingers :)
// 0 (00000000) = all fingers off = mute
// bottom hand top hand
//     0111 011
//             1
// 01110111 = 119 = E
// 255 (11111111) = all fingers on = lg
// below per 10 all 255 posibilities
// Look at position 175
// custom frequency to get vibrato on D 
// using bottom hand middle finger.

int data[] = { 
mute,hg,ha,fs,ha,hg,ha,e,ha,hg,ha,
f,ha,hg,ha,d,ha,hg,ha,fs,ha,
hg,ha,e,ha,hg,ha,f,ha,hg,ha,
c,ha,hg,ha,f,ha,hg,ha,e,ha,
hg,ha,f,ha,hg,ha,d,ha,hg,ha,
f,ha,hg,ha,e,ha,hg,ha,f,ha,
hg,ha,b,ha,hg,ha,f,ha,hg,ha,
e,ha,hg,ha,f,ha,hg,ha,d,ha,
hg,ha,f,ha,hg,ha,e,ha,hg,ha,
e,ha,hg,ha,c,ha,hg,ha,f,ha,
hg,ha,e,ha,hg,ha,f,ha,hg,ha,
d,ha,hg,ha,fs,ha,hg,ha,e,ha,
hg,ha,f,ha,hg,ha,la,ha,hg,ha,
f,ha,hg,ha,e,ha,hg,ha,f,ha,
hg,ha,d,ha,hg,ha,f,ha,hg,ha,
e,ha,hg,ha,f,ha,hg,ha,cs,ha,
hg,ha,f,ha,hg,ha,e,ha,hg,ha,
f,ha,hg,ha,580,ha,hg,ha,f,ha,
hg,ha,e,ha,hg,ha,f,ha,hg,ha,
as,ha,hg,ha,f,ha,hg,ha,e,ha,
hg,ha,f,ha,hg,ha,d,ha,hg,ha,
f,ha,hg,ha,e,ha,hg,ha,f,ha,
hg,ha,c,ha,hg,ha,fs,ha,hg,ha,
e,ha,hg,ha,f,ha,hg,ha,d,ha,
hg,ha,fs,ha,hg,ha,e,ha,hg,ha,
fs,ha,hg,ha,lg };

void setup() {
  pinMode(14, OUTPUT);

  Serial.begin(115200);
  delay(1000); 
  }

void loop() {
  int t1=touchRead(4);  
  int t2=touchRead(2);
  int t3=touchRead(15);
  int t4=touchRead(13);
  int t5=touchRead(12);
  int t6=touchRead(27);
  int t7=touchRead(33);
  int t8=touchRead(32);
  
int note = 0;

// Debug reading  
//Serial.println(t1);
// My readings are near zero and above 50
// So I chose 30 (adjust when needed)

if ( t1 < 30) {
  bitSet(note, 0);  
} 
if ( t2 < 30) {
  bitSet(note, 1);  
} 
if ( t3 < 30) {
  bitSet(note, 2);  
} 
if ( t4 < 30) {
  bitSet(note, 3);  
} 
if ( t5 < 30) {
  bitSet(note, 4);  
} 
if ( t6 < 30) {
  bitSet(note, 5);  
} 
if ( t7 < 30) {
  bitSet(note, 6);  
} 
if ( t8 < 30) {
  bitSet(note, 7);  
}

//Serial.println(note);
  if (note == 0 && note != prevnote) {
    noTone(14);
    prevnote = 0;
  }

if (note != prevnote) {
  tone(14,data[note]);
  // debug
  //Serial.print("Note number : ");
  //Serial.println(note);
  //Serial.print("Freq : ");
  //Serial.println(data[note]);
  
  prevnote = note;
  }
}

Making a plotter from a Laser Cutter

I’ve got a SculpFun Laser Cutter.
I’m using this a lot … as lasercutter.

But you can also use a laser cutter as a Plotter or vinyl cutter!

Just remove the laser head, and replace it with a pen or knife!
(360 swivel blade)

First : replace laserhead and make a controllable pen holder.

My Laser Cutter can be controlled using G-codes real-time.
Example my etch a sketch.
Now I just have to add a Z axis controller to control pen up/down.

While I’m not afraid to cut things by hand. Like our front door decoration.

I like a more precise and repeatable way. I’ve cut lots of Nae Bother Logo’s like on my Laptop. (These were made using a computer cutter)

Test code (no gcode yet):

#include <Servo.h>
const int buttonPin = 16;  
int lastButtonState = 0;  

Servo myservo; 

void setup() {
  pinMode(buttonPin, INPUT);
  myservo.attach(2); 
  myservo.write(0);
  Serial.begin(115200);
}

void loop() {
  int reading = digitalRead(buttonPin);

      if (reading == 1 && lastButtonState == 0) {
        myservo.write(0);
        Serial.println("UP");
        lastButtonState = 1;
      }
      if (reading == 0 && lastButtonState == 1) {
        myservo.write(160);
        Serial.println("DOWN");
        lastButtonState = 0;
      }

}

Better thumb thingy for short movie clips

(Improvement of previous posted script)

Place below bash script in a subdir with media.

Animated GIFs are generated from the video files.
You can set the number of frames per length

#!/bin/bash
#set -x
mkdir -p tmp prev
: > list
: > index.html
find ../ -type f -print | egrep -i "mp4$|wmv$|avi$|mpg$|mpeg$|flv$|mov$|divx$"  > list

cat list | while read movie; do 
	rm -f tmp/*
	newname=$( echo $movie | tr -d ' /.[]{}()' ) 
	if [ ! -f prev/${newname}.gif ] ; then

		echo "Filename : $movie"

		kseconds=$( mediainfo --Inform="Video;%Duration%"  "$movie" )
		minutes=$(( $kseconds / 60000 ))
		echo "Minutes : $minutes"
		if [ $minutes -gt 10 ] ; then 
			rate=0.032
		else
			rate=0.128
		fi
echo "ffmpeg -hide_banner -loglevel error -i $movie -r $rate  -vf scale=640:-1 tmp/output_%04d.png"
		ffmpeg -hide_banner -loglevel error -i "$movie" -r $rate  -vf scale=640:-1 tmp/output_%04d.png < /dev/null
		# remove first (most of the time just black or logo)
		rm tmp/output_0001.png
		echo -n "Frames : "
		ls tmp/out* | wc -l
		convert -delay 50 -loop 0  tmp/output*.png prev/${newname}.gif
	else
		echo "$movie exists ... skipping"
	fi
	echo "<h1>${movie}</h1><br>" >> index.html
	echo "<img src=\"prev/${newname}.gif\"><br>" >> index.html
done
exit 0

run and get something like below (output is still running as I made this post)

First tests with 360 Lidar

In the past, I’ve played with a standard lidar device.

Now it is time to check out a 360 version.

This one is very small (40mm x 40mm x 35mm)

Provided examples didn’t work. (People with same error on the Github issues tracker page had the same)

I changed the python script so it worked also with this YDLidar T-mini Plus version.

Next to-do, put this on my robot car.

Code:

import os
import ydlidar
import time
import sys
from matplotlib.patches import Arc
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np

RMAX = 32.0

fig = plt.figure()
lidar_polar = plt.subplot(polar=True)
lidar_polar.autoscale_view(True,True,True)
lidar_polar.set_rmax(RMAX)
lidar_polar.grid(True)
ports = ydlidar.lidarPortList();
port = "/dev/ttyUSB0";
for key, value in ports.items():
    port = value;
    
laser = ydlidar.CYdLidar();

laser.setlidaropt(ydlidar.LidarPropSerialPort, port);
laser.setlidaropt(ydlidar.LidarPropSerialBaudrate, 230400);
laser.setlidaropt(ydlidar.LidarPropLidarType, ydlidar.TYPE_TRIANGLE);
laser.setlidaropt(ydlidar.LidarPropDeviceType, ydlidar.YDLIDAR_TYPE_SERIAL);
laser.setlidaropt(ydlidar.LidarPropScanFrequency, 10.0);
laser.setlidaropt(ydlidar.LidarPropSampleRate, 4);
laser.setlidaropt(ydlidar.LidarPropSingleChannel, False);
laser.setlidaropt(ydlidar.LidarPropMaxAngle, 180.0);
laser.setlidaropt(ydlidar.LidarPropMinAngle, -180.0);
laser.setlidaropt(ydlidar.LidarPropMaxRange, 16.0);
laser.setlidaropt(ydlidar.LidarPropMinRange, 0.02);
laser.setlidaropt(ydlidar.LidarPropIntenstiy, True);

scan = ydlidar.LaserScan()

def animate(num):
    
    r = laser.doProcessSimple(scan);
    if r:
        angle = []
        ran = []
        intensity = []
        for point in scan.points:
            angle.append(point.angle);
            ran.append(point.range);
            intensity.append(point.intensity);
        lidar_polar.clear()
        lidar_polar.scatter(angle, ran, c=intensity, cmap='hsv', alpha=0.95, marker=".")

ret = laser.initialize();
if ret:
    ret = laser.turnOn();
    if ret:
        ani = animation.FuncAnimation(fig, animate, interval=50)
        plt.show()
    laser.turnOff();
laser.disconnecting();
plt.close();

Mattermost notification thingy

3D printed a little light case for a wemos and a piece of WS2812 led strip I had lying around.

Schematic:
NOTE: The resistor is 100-500 ohm (I forgot, just try)
You can only use this trick for a few leds (I used 4), else you better can use the sacrifice a led to make a level shifter trick.
(Wemos logic is 3.3V and the led strip is 5V)

I flashed ESPHome on the wemos using the flasher in Home Assistant.

Code:

esphome:
  name: matternotification
  friendly_name: matternotification

esp8266:
  board: d1_mini

# Enable logging
logger:

# Enable Home Assistant API
api:
  encryption:
    key: "ogFxZUXerNxxxxxxxxxxxxxxxxxWaWyJVxCM="

ota:
  - platform: esphome
    password: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"

wifi:
  ssid: !secret wifi_ssid
  password: !secret wifi_password

  # Enable fallback hotspot (captive portal) in case wifi connection fails
  ap:
    ssid: "Matternotification"
    password: "rxxxxxxxxxxxxxxx"

captive_portal:

light:
  - platform: neopixelbus
    type: GRB
    variant: WS2812
    pin: D4
    num_leds: 4
    name: "NeoPixelMattermost"        

To get the status of messages and controlling the HA entity, I made a bash script.

First curl command to get a token from Mattermost using the API.
Second curl command to get messages states from Mattermost.

Bottom two curl command turn a light entity on or off in your Home Assistant server using a API

#!/bin/bash
#set -x
# change : mattermost username and password (and server)
# change : mattermost userid and teamid
# change : home assistant long time token (and HA server) 
# change : light entity
#
while true; do
# Get token using login
#token=$(curl -s -i -X POST -H 'Content-Type: application/json' -d '{"login_id":"username","password":"password"}' https://mattermostinstance.com/api/v4/users/login | grep ^Token | awk '{ print $2 }' | tr -d '\r' )
#using a MM auth token (see below)
token=xxxxxxxxxxxxxxxxxxxx

# Get messages 
# Gives you something like
# {"team_id":"j3fd7gksxxxxxxxxxxxxxjr","channel_id":"rroxxxxxxxxxxxxxxtueer","msg_count":0,"mention_count":0,"mention_count_root":0,"urgent_mention_count":0,"msg_count_root":0}
# We need to count ":0" 

messages=$(curl -s  -i -H "Authorization: Bearer ${token}"  https://mattermostinstance.com/api/v4/users/ou5nz5--USER-UUID--rbuw4xy/channels/rropejn--TEAM-ID--tueer/unread | grep channel 
| grep -o ":0" | wc -l)

# If 5 times ":0" then no messages
if [ $messages == 5 ] ; then
# Turn off
curl -s -X POST -H "Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cC--HOME-ASSISTANT-LONG-TIME-TOKEN-CBusTgTUueWpPNdH5WAWOE" \
       -H "Content-Type: application/json" \
       -d '{"entity_id": "light.matternotification_neopixelmattermost_2"}' \
       http://192.168.1.2:8123/api/services/light/turn_off > /dev/null
else
# Turn on
curl -s -X POST -H "Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cC--HOME-ASSISTANT-LONG-TIME-TOKEn--CBusTgTUueWpPNdH5WAWOE" \
       -H "Content-Type: application/json" \
       -d '{"entity_id": "light.matternotification_neopixelmattermost_2"}' \
       https://192.168.1.2:8123/api/services/light/turn_on > /dev/null
fi
sleep 5
done

Get a Long-lived access token from HA:

Profile > Security and Create Token

Create a token in Mattermost:

"If something is worth doing, it's worth overdoing."