Skip to content
Snippets Groups Projects
Commit b9f4038c authored by Vincent Wall's avatar Vincent Wall
Browse files

six-class svc model, increased sensing frequency, semilogy spectrum

parent 52e775a6
No related branches found
No related tags found
No related merge requests found
......@@ -35,9 +35,9 @@ from glob import glob
# ==================
BASE_DIR = "."
SOUND_NAME = "sweep" # sound to use
CLASS_LABELS = ["tip", "middle", "base", "back", "none"] # classes to train
CLASS_LABELS = ["tip", "middle", "base", "back", "left", "right", "none"] # classes to train
SAMPLES_PER_CLASS = 20
MODEL_NAME = "lndw2022_sweep_1s"
MODEL_NAME = "berlinsummit_sweep_1s"
SHUFFLE_RECORDING_ORDER = False
APPEND_TO_EXISTING_FILES = True
# ==================
......@@ -83,9 +83,10 @@ def setup_experiment():
current_idx = 0
if APPEND_TO_EXISTING_FILES:
max_id = max([int(x.split("/")[-1].split("_")[0]) for x in glob(DATA_DIR+"/*.wav")])
label_list = [""]*max_id + label_list
current_idx = max_id
if len(glob(DATA_DIR+"/*.wav")) > 0:
max_id = max([int(x.split("/")[-1].split("_")[0]) for x in glob(DATA_DIR+"/*.wav")])
label_list = [""]*max_id + label_list
current_idx = max_id
def setup_jack(sound_name):
......
......@@ -25,6 +25,9 @@ import pickle
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.model_selection import train_test_split
from matplotlib import pyplot
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from A_record import MODEL_NAME
......@@ -32,7 +35,7 @@ from A_record import MODEL_NAME
# USER SETTINGS
# ==================
BASE_DIR = "."
SENSORMODEL_FILENAME = "sensor_model_lndw2022_AAS016.pkl"
SENSORMODEL_FILENAME = "sensor_model_berlinsummit_AAS016_svc.pkl"
TEST_SIZE = 0 # percentage of samples left out of training and used for reporting test score
SHOW_PLOTS = True
# ==================
......@@ -138,7 +141,7 @@ def main():
sounds, labels = load_sounds(DATA_DIR)
# spectra = [sound_to_spectrum(sound) for sound in sounds]
spectra = [sound_to_spectrum_stft(sound) for sound in sounds]
classes = list(set(labels))
classes = sorted(list(set(labels)))
if SHOW_PLOTS:
plot_spectra(spectra, labels)
......@@ -148,8 +151,15 @@ def main():
else:
X_train, y_train = (spectra, labels)
clf = KNeighborsClassifier() # using default KNN classifier
# clf = KNeighborsClassifier() # using default KNN classifier
clf = Pipeline([
("standardscaler", StandardScaler()),
("classifier", SVC(kernel="linear", C=0.01,
probability=True)),
])
clf.name = "svc_custom" # name is used for saving
clf.fit(X_train, y_train)
train_score = clf.score(X_train, y_train)
print("Fitted sensor model to data!")
print("Training score: {:.2f}".format(train_score))
......
......@@ -49,7 +49,17 @@ label_renamer_lndw2022 = {
"back": "Rückseite",
"none": "Ohne"
}
label_renamer = label_renamer_lndw2022
label_renamer_berlinsummit = {
"tip": "Tip",
"middle": "Middle",
"base": "Base",
"back": "Backside",
"none": "No contact",
"left": "Left side",
"right": "Right side"
}
label_renamer = label_renamer_berlinsummit
CHANNELS = 1
SR = 48000
......@@ -72,6 +82,7 @@ class LiveAcousticSensor(object):
self.setup_jack()
self.setup_model()
self.setup_window()
self.pause_time = 0.01
def setup_jack(self):
self.J = JackSignal("JS")
......@@ -115,11 +126,13 @@ class LiveAcousticSensor(object):
ax2.set_title("Amplitude spectrum", size=20)
ax2.set_xlabel("Frequency [Hz]")
self.wavelines, = ax1.plot(self.Ains[0])
self.spectrumlines, = ax2.plot(sound_to_spectrum_stft(self.Ains[0]))
ax2.set_ylim([0, 250])
self.spectrumlines, = ax2.plot(sound_to_spectrum_stft(self.Ains[0])+1e-16)
ax2.set_ylim([1e-1, 1000])
ax2.set_yscale("log")
ax3a = f.add_subplot(2, 2, 3)
ax3a.text(0.0, 0.8, "Gemessener Kontakt:", dict(size=40))
ax3a.text(0.0, 0.8, "Measured contact:", dict(size=40))
self.predictiontext = ax3a.text(0.25, 0.25, "", dict(size=70))
ax3a.set_xticklabels([])
ax3a.set_yticklabels([])
......@@ -169,7 +182,7 @@ class LiveAcousticSensor(object):
self.J.process()
self.J.wait()
self.predict()
plt.pause(1)
plt.pause(self.pause_time)
else:
key = input("Press <Enter> to sense! ('q' to abort)")
while key == '':
......@@ -189,6 +202,12 @@ def on_key(event):
toggle_pause(event)
elif event.key == "q":
sys.exit()
elif event.key == "+":
predictor.pause_time = min(predictor.pause_time + 0.2, 2)
print(f"{predictor.pause_time=}")
elif event.key == "-":
predictor.pause_time = max(predictor.pause_time - 0.2, 0)
print(f"{predictor.pause_time=}")
def main():
......
File added
img/back.jpg

544 KiB | W: | H:

img/back.jpg

353 KiB | W: | H:

img/back.jpg
img/back.jpg
img/back.jpg
img/back.jpg
  • 2-up
  • Swipe
  • Onion skin
img/base.jpg

475 KiB | W: | H:

img/base.jpg

343 KiB | W: | H:

img/base.jpg
img/base.jpg
img/base.jpg
img/base.jpg
  • 2-up
  • Swipe
  • Onion skin
img/left.jpg

698 KiB

img/middle.jpg

493 KiB | W: | H:

img/middle.jpg

372 KiB | W: | H:

img/middle.jpg
img/middle.jpg
img/middle.jpg
img/middle.jpg
  • 2-up
  • Swipe
  • Onion skin
img/none.jpg

693 KiB | W: | H:

img/none.jpg

494 KiB | W: | H:

img/none.jpg
img/none.jpg
img/none.jpg
img/none.jpg
  • 2-up
  • Swipe
  • Onion skin
img/right.jpg

873 KiB

img/tip.jpg

643 KiB | W: | H:

img/tip.jpg

423 KiB | W: | H:

img/tip.jpg
img/tip.jpg
img/tip.jpg
img/tip.jpg
  • 2-up
  • Swipe
  • Onion skin
#!/usr/bin/env bash
amixer -D hw:USB sset 'Master',0 80% unmute
amixer -D hw:USB sset 'Line',0 0% mute cap capture 70%
amixer -D hw:USB sset 'Line',1 0% mute
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment