Skip to content

Commit

Permalink
feat(metadata): updated metadata
Browse files Browse the repository at this point in the history
  • Loading branch information
StanGirard committed May 12, 2023
1 parent bc7e84b commit badb27b
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 4 deletions.
8 changes: 5 additions & 3 deletions loaders/audio.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import os
import tempfile
from io import BytesIO

import time
import openai
import streamlit as st
from langchain.document_loaders import TextLoader
Expand All @@ -16,6 +16,7 @@
def _transcribe_audio(api_key, audio_file):
openai.api_key = api_key
transcript = ""

with BytesIO(audio_file.read()) as audio_bytes:
# Get the extension of the uploaded file
file_extension = os.path.splitext(audio_file.name)[-1]
Expand All @@ -32,7 +33,8 @@ def _transcribe_audio(api_key, audio_file):

def process_audio(openai_api_key, vector_store, file_name):
file_sha = ""

dateshort = time.strftime("%Y%m%d-%H%M%S")
file_name = f"audiotranscript_{dateshort}.audio"
transcript = _transcribe_audio(openai_api_key, file_name)
file_sha = compute_sha1_from_content(transcript.text.encode("utf-8"))

Expand All @@ -44,7 +46,7 @@ def process_audio(openai_api_key, vector_store, file_name):
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
texts = text_splitter.split_text(transcript.text)

docs_with_metadata = [Document(page_content=text, metadata={"file_sha1": file_sha}) for text in texts]
docs_with_metadata = [Document(page_content=text, metadata={"file_sha1": file_sha,"file_name": file_name, "chunk_size": chunk_size, "chunk_overlap": chunk_overlap, "date": dateshort}) for text in texts]


vector_store.add_documents(docs_with_metadata)
Expand Down
5 changes: 4 additions & 1 deletion loaders/common.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import tempfile
import time
from utils import compute_sha1_from_file
from langchain.schema import Document
import streamlit as st
Expand All @@ -7,6 +8,8 @@
def process_file(vector_store, file, loader_class, file_suffix):
documents = []
file_sha = ""
file_name = file.name
dateshort = time.strftime("%Y%m%d")
with tempfile.NamedTemporaryFile(delete=True, suffix=file_suffix) as tmp_file:
tmp_file.write(file.getvalue())
tmp_file.flush()
Expand All @@ -23,7 +26,7 @@ def process_file(vector_store, file, loader_class, file_suffix):
documents = text_splitter.split_documents(documents)

# Add the document sha1 as metadata to each document
docs_with_metadata = [Document(page_content=doc.page_content, metadata={"file_sha1": file_sha1}) for doc in documents]
docs_with_metadata = [Document(page_content=doc.page_content, metadata={"file_sha1": file_sha1, "file_name": file_name, "chunk_size": chunk_size, "chunk_overlap": chunk_overlap, "date": dateshort}) for doc in documents]

vector_store.add_documents(docs_with_metadata)
return

0 comments on commit badb27b

Please sign in to comment.