打造专属BGM,Python 深度学习教你
音乐+文字,组合食用,效果更佳。
“那些听不到音乐的人,以为跳舞的人疯了。” 尼采这句话好有趣,也告诉我们音乐对于日常生活的不可或缺之处。但是对于一般人来说,想要精通各种乐器难度较高。故今天我们来实践一个普通人可以制作的音乐项目,用深度学习的方法让计算机自动生成自己需要的音乐。完整代码见文末。
notes = []
for file in self.songs:
print("Parsing %s" % file)
try:
midi = converter.parse(file)
except IndexError as e:
print(f"Could not parse {file}")
print(e)
continue
notes_to_parse = None
try:
s2 = instrument.partitionByInstrument(midi)
notes_to_parse = s2.parts[0].recurse()
except:
notes_to_parse = midi.flat.notes
prev_offset = 0.0
for element in notes_to_parse:
if isinstance(element, note.Note) or isinstance(element, chord.Chord):
duration = element.duration.quarterLength
if isinstance(element, note.Note):
name = element.pitch
elif isinstance(element, chord.Chord):
name = ".".join(str(n) for n in element.normalOrder)
notes.append(f"{name}${duration}")
rest_notes = int((element.offset - prev_offset) / TIMESTEP - 1)
for _ in range(0, rest_notes):
notes.append("NULL")
prev_offset = element.offset
with open("notes/" + self.model_name, "wb") as filepath:
pickle.dump(notes, filepath)
def prepare_sequences(self, notes, n_vocab):
# 获取所有pitch 名称
pitchnames = sorted(set(item for item in notes))
# 创建一个字典来映射音高到整数
note_to_int = dict((note, number + 1) for number, note in enumerate(pitchnames))
note_to_int["NULL"] = 0
network_input = []
network_output = []
for i in range(0, len(notes) - SEQUENCE_LEN, 1):
sequence_in = notes[i : i + SEQUENCE_LEN]
sequence_out = notes[i + SEQUENCE_LEN]
network_input.append([note_to_int[char] for char in sequence_in])
network_output.append(note_to_int[sequence_out])
n_patterns = len(network_input)
network_input = numpy.reshape(network_input, (n_patterns, SEQUENCE_LEN, 1))
network_input = network_input / float(n_vocab)
print(network_output)
network_output = np_utils.to_categorical(network_output)
return (network_input, network_output)
def train(self, network_input, network_output):
""" train the neural network """
filepath = (
self.model_name + "-weights-improvement-{epoch:02d}-{loss:.4f}-bigger.hdf5"
)
checkpoint = ModelCheckpoint(
filepath, monitor="loss", verbose=0, save_best_only=True, mode="min"
)
callbacks_list = [checkpoint]
self.model.fit(
network_input,
network_output,
epochs=self.epochs,
batch_size=64,
callbacks=callbacks_list,
)
def create_network(network_input, n_vocab):
print("Input shape ", network_input.shape)
print("Output shape ", n_vocab)
""" create the structure of the neural network """
model = Sequential()
model.add(
Bidirectional(
LSTM(512, return_sequences=True),
input_shape=(network_input.shape[1], network_input.shape[2]),
)
)
model.add(Dropout(0.3))
model.add(Bidirectional(LSTM(512)))
model.add(Dense(n_vocab))
model.add(Activation("softmax"))
model.compile(loss="categorical_crossentropy", optimizer="rmsprop")
return model
def get_start():
# pick a random sequence from the input as a starting point for the prediction
start = numpy.random.randint(0, len(network_input) - 1)
pattern = network_input[start]
prediction_output = []
return pattern, prediction_output
# generate verse 1
verse1_pattern, verse1_prediction_output = get_start()
for note_index in range(4 * SEQUENCE_LEN):
prediction_input = numpy.reshape(
verse1_pattern, (1, len(verse1_pattern), 1)
)
prediction_input = prediction_input / float(n_vocab)
prediction = model.predict(prediction_input, verbose=0)
index = numpy.argmax(prediction)
print("index", index)
result = int_to_note[index]
verse1_prediction_output.append(result)
verse1_pattern.append(index)
verse1_pattern = verse1_pattern[1 : len(verse1_pattern)]
# generate verse 2
verse2_pattern = verse1_pattern
verse2_prediction_output = []
for note_index in range(4 * SEQUENCE_LEN):
prediction_input = numpy.reshape(
verse2_pattern, (1, len(verse2_pattern), 1)
)
prediction_input = prediction_input / float(n_vocab)
prediction = model.predict(prediction_input, verbose=0)
index = numpy.argmax(prediction)
print("index", index)
result = int_to_note[index]
verse2_prediction_output.append(result)
verse2_pattern.append(index)
verse2_pattern = verse2_pattern[1 : len(verse2_pattern)]
# generate chorus
chorus_pattern, chorus_prediction_output = get_start()
for note_index in range(4 * SEQUENCE_LEN):
prediction_input = numpy.reshape(
chorus_pattern, (1, len(chorus_pattern), 1)
)
prediction_input = prediction_input / float(n_vocab)
prediction = model.predict(prediction_input, verbose=0)
index = numpy.argmax(prediction)
print("index", index)
result = int_to_note[index]
chorus_prediction_output.append(result)
chorus_pattern.append(index)
chorus_pattern = chorus_pattern[1 : len(chorus_pattern)]
# generate bridge
bridge_pattern, bridge_prediction_output = get_start()
for note_index in range(4 * SEQUENCE_LEN):
prediction_input = numpy.reshape(
bridge_pattern, (1, len(bridge_pattern), 1)
)
prediction_input = prediction_input / float(n_vocab)
prediction = model.predict(prediction_input, verbose=0)
index = numpy.argmax(prediction)
print("index", index)
result = int_to_note[index]
bridge_prediction_output.append(result)
bridge_pattern.append(index)
bridge_pattern = bridge_pattern[1 : len(bridge_pattern)]
return (
verse1_prediction_output
+ chorus_prediction_output
+ verse2_prediction_output
+ chorus_prediction_output
+ bridge_prediction_output
+ chorus_prediction_output
)
for pattern in prediction_output:
if "$" in pattern:
pattern, dur = pattern.split("$")
if "/" in dur:
a, b = dur.split("/")
dur = float(a) / float(b)
else:
dur = float(dur)
# pattern is a chord
if ("." in pattern) or pattern.isdigit():
notes_in_chord = pattern.split(".")
notes = []
for current_note in notes_in_chord:
new_note = note.Note(int(current_note))
new_note.storedInstrument = instrument.Piano()
notes.append(new_note)
new_chord = chord.Chord(notes)
new_chord.offset = offset
new_chord.duration = duration.Duration(dur)
output_notes.append(new_chord)
# pattern is a rest
elif pattern is "NULL":
offset += TIMESTEP
# pattern is a note
else:
new_note = note.Note(pattern)
new_note.offset = offset
new_note.storedInstrument = instrument.Piano()
new_note.duration = duration.Duration(dur)
output_notes.append(new_note)
# 增加每次迭代的偏移量,这样笔记就不会堆积
offset += TIMESTEP
midi_stream = stream.Stream(output_notes)
output_file = os.path.basename(self.weights) + ".mid"
print("output to " + output_file)
midi_stream.write("midi", fp=output_file)
更多精彩推荐
☞市值达 58 亿美元,吴恩达的在线教育平台 Coursera 正式上市
☞英特尔第三代 Ice Lake 发布正面与 AMD EPYC PK,结果令人大跌眼镜!
☞AR 第一大单,微软 219 亿美元为美军打造高科技头盔
点分享 点收藏 点点赞 点在看
关注公众号:拾黑(shiheibook)了解更多
[广告]赞助链接:
四季很好,只要有你,文娱排行榜:https://www.yaopaiming.com/
让资讯触达的更精准有趣:https://www.0xu.cn/
关注网络尖刀微信公众号
随时掌握互联网精彩
随时掌握互联网精彩
赞助链接
排名
热点
搜索指数
- 1 为基层减负赋能 促干部实干担当 7941722
- 2 冷冷冷 多地将冻成这样“紫” 7903028
- 3 一想到28号全员洗头就想笑 7812567
- 4 两新扩围落地实施 带动产销两旺 7781066
- 5 央视boys一首歌全是易错字 7627036
- 6 身体这几个表现说明你太累了 7572729
- 7 春运现骨折票:乘火车出重庆2.5元 7409198
- 8 赵今麦 00后的黑历史都是高清的 7308016
- 9 拳王邹市明承认创业失败 7233830
- 10 王菲春晚新歌mv里有李子柒 7109763