Skip to content

Commit

Permalink
Merge pull request #226 from MagnivOrg/dmugtasimov/dmi-52-support-cha…
Browse files Browse the repository at this point in the history
…nges-in-python-sdk

Linters, formatting and testing improvements
  • Loading branch information
dmugtasimov authored Feb 1, 2025
2 parents 46f957e + 0eb5177 commit 1430afb
Show file tree
Hide file tree
Showing 20 changed files with 168 additions and 293 deletions.
5 changes: 2 additions & 3 deletions .devcontainer/devcontainer.json
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,10 @@
"editor.formatOnSave": true,
"python.analysis.autoImportCompletions": true,
"[python]": {
"editor.defaultFormatter": "ms-python.black-formatter"
"editor.defaultFormatter": "charliermarsh.ruff"
},
// TODO(dmu) HIGH: Make linter configuration consistent with .pre-commit-config.yaml
"ruff.args": [
"--exclude",
"migrations",
"--ignore",
"E501,E711,E712",
"--fix",
Expand Down
6 changes: 6 additions & 0 deletions .editorconfig
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
# See https://editorconfig.org/ for more info about this file
[*.py]
max_line_length = 120
charset = utf-8
indent_style = space
indent_size = 4
3 changes: 1 addition & 2 deletions .github/workflows/integration-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,5 +26,4 @@ jobs:
python -m pip install behave langchain openai
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- name: Run Integration Tests
run: |
behave
run: behave
6 changes: 4 additions & 2 deletions .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,14 @@ jobs:
uses: actions/setup-python@v5
with:
python-version: "3.8"

- name: Install poetry
run: pipx install poetry

- name: Running poetry install
run: poetry install

- name: Build and publish
env:
POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_PASSWORD }}
run: |
poetry publish --build
run: poetry publish --build
27 changes: 11 additions & 16 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,20 +1,15 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: end-of-file-fixer
exclude: (app/utils/evaluate/(javascript_code_wrapper|python_code_wrapper)\.txt|langchain_prompts\.txt)
- id: trailing-whitespace
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.0.290
rev: v0.9.4
hooks:
- id: ruff
args:
[--exclude, features, --ignore, "E501", --fix, --exit-non-zero-on-fix]

- repo: https://github.com/psf/black-pre-commit-mirror
rev: 23.9.1
hooks:
- id: black
args: [--force-exclude, features]

- repo: https://github.com/pycqa/isort
rev: 5.12.0
hooks:
- id: isort
name: isort (python)
args: [--profile, black, --skip, features, --filter-files]
name: 'ruff: fix imports'
args: ["--select", "I", "--fix"]
- id: ruff
- id: ruff-format
8 changes: 7 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,13 @@
RUN_TEST := test -f .env && set -a; . ./.env; set +a; poetry run pytest

.PHONY: lint
lint:
poetry run pre-commit run --all-files

.PHONY: test
test:
poetry run pytest
${RUN_TEST}

.PHONY: test-sw
test-sw:
${RUN_TEST} -vv --sw --show-capture=no
14 changes: 7 additions & 7 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,11 @@
<a href="https://docs.promptlayer.com"><img alt="Docs" src="https://custom-icon-badges.herokuapp.com/badge/docs-PL-green.svg?logo=cake&style=for-the-badge"></a>
<a href="https://www.loom.com/share/196c42e43acd4a369d75e9a7374a0850"><img alt="Demo with Loom" src="https://img.shields.io/badge/Demo-loom-552586.svg?logo=loom&style=for-the-badge&labelColor=gray"></a>

---
---

<div align="left">

[PromptLayer](https://promptlayer.com/) is the first platform that allows you to track, manage, and share your GPT prompt engineering. PromptLayer acts a middleware between your code and OpenAI’s python library.
[PromptLayer](https://promptlayer.com/) is the first platform that allows you to track, manage, and share your GPT prompt engineering. PromptLayer acts a middleware between your code and OpenAI’s python library.

PromptLayer records all your OpenAI API requests, allowing you to search and explore request history in the PromptLayer dashboard.

Expand Down Expand Up @@ -53,14 +53,14 @@ openai = promptlayer.openai

### Adding PromptLayer tags: `pl_tags`

PromptLayer allows you to add tags through the `pl_tags` argument. This allows you to track and group requests in the dashboard.
PromptLayer allows you to add tags through the `pl_tags` argument. This allows you to track and group requests in the dashboard.

*Tags are not required but we recommend them!*

```python
openai.Completion.create(
engine="text-ada-001",
prompt="My name is",
engine="text-ada-001",
prompt="My name is",
pl_tags=["name-guessing", "pipeline-2"]
)
```
Expand All @@ -69,11 +69,11 @@ After making your first few requests, you should be able to see them in the Prom

## Using the REST API

This Python library is a wrapper over PromptLayer's REST API. If you use another language, like Javascript, just interact directly with the API.
This Python library is a wrapper over PromptLayer's REST API. If you use another language, like Javascript, just interact directly with the API.

Here is an example request below:

```jsx
```python
import requests
request_response = requests.post(
"https://api.promptlayer.com/track-request",
Expand Down
2 changes: 1 addition & 1 deletion promptlayer/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from .promptlayer import AsyncPromptLayer, PromptLayer

__version__ = "1.0.37"
__version__ = "1.0.38"
__all__ = ["PromptLayer", "AsyncPromptLayer", "__version__"]
38 changes: 9 additions & 29 deletions promptlayer/promptlayer.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,9 +61,7 @@ def __init__(
self.api_key = api_key
self.templates = TemplateManager(api_key)
self.group = GroupManager(api_key)
self.tracer_provider, self.tracer = self._initialize_tracer(
api_key, enable_tracing
)
self.tracer_provider, self.tracer = self._initialize_tracer(api_key, enable_tracing)
self.track = TrackManager(api_key)

def __getattr__(
Expand Down Expand Up @@ -233,9 +231,7 @@ def run(
span.set_attribute("prompt_name", prompt_name)
span.set_attribute("function_input", str(_run_internal_kwargs))
pl_run_span_id = hex(span.context.span_id)[2:].zfill(16)
result = self._run_internal(
**_run_internal_kwargs, pl_run_span_id=pl_run_span_id
)
result = self._run_internal(**_run_internal_kwargs, pl_run_span_id=pl_run_span_id)
span.set_attribute("function_output", str(result))
return result
else:
Expand Down Expand Up @@ -285,18 +281,12 @@ def run_workflow(

if not return_all_outputs:
if is_workflow_results_dict(results):
output_nodes = [
node_data
for node_data in results.values()
if node_data.get("is_output_node")
]
output_nodes = [node_data for node_data in results.values() if node_data.get("is_output_node")]

if not output_nodes:
raise Exception(json.dumps(results, indent=4))

if not any(
node.get("status") == "SUCCESS" for node in output_nodes
):
if not any(node.get("status") == "SUCCESS" for node in output_nodes):
raise Exception(json.dumps(results, indent=4))

return results
Expand Down Expand Up @@ -364,14 +354,10 @@ def __init__(
self.api_key = api_key
self.templates = AsyncTemplateManager(api_key)
self.group = AsyncGroupManager(api_key)
self.tracer_provider, self.tracer = self._initialize_tracer(
api_key, enable_tracing
)
self.tracer_provider, self.tracer = self._initialize_tracer(api_key, enable_tracing)
self.track = AsyncTrackManager(api_key)

def __getattr__(
self, name: Union[Literal["openai"], Literal["anthropic"], Literal["prompts"]]
):
def __getattr__(self, name: Union[Literal["openai"], Literal["anthropic"], Literal["prompts"]]):
if name == "openai":
import openai as openai_module

Expand Down Expand Up @@ -400,9 +386,7 @@ async def run_workflow(
input_variables: Optional[Dict[str, Any]] = None,
metadata: Optional[Dict[str, str]] = None,
workflow_label_name: Optional[str] = None,
workflow_version: Optional[
int
] = None, # This is the version number, not the version ID
workflow_version: Optional[int] = None, # This is the version number, not the version ID
return_all_outputs: Optional[bool] = False,
) -> Dict[str, Any]:
try:
Expand Down Expand Up @@ -448,9 +432,7 @@ async def run(
span.set_attribute("prompt_name", prompt_name)
span.set_attribute("function_input", str(_run_internal_kwargs))
pl_run_span_id = hex(span.context.span_id)[2:].zfill(16)
result = await self._run_internal(
**_run_internal_kwargs, pl_run_span_id=pl_run_span_id
)
result = await self._run_internal(**_run_internal_kwargs, pl_run_span_id=pl_run_span_id)
span.set_attribute("function_output", str(result))
return result
else:
Expand Down Expand Up @@ -563,9 +545,7 @@ async def _run_internal(
input_variables=input_variables,
metadata=metadata,
)
prompt_blueprint = await self.templates.get(
prompt_name, get_prompt_template_params
)
prompt_blueprint = await self.templates.get(prompt_name, get_prompt_template_params)
prompt_blueprint_model = self._validate_and_extract_model_from_prompt_blueprint(
prompt_blueprint=prompt_blueprint, prompt_name=prompt_name
)
Expand Down
43 changes: 12 additions & 31 deletions promptlayer/promptlayer_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,7 @@ class PromptLayerBase(object):
"_tracer",
]

def __init__(
self, obj, function_name="", provider_type="openai", api_key=None, tracer=None
):
def __init__(self, obj, function_name="", provider_type="openai", api_key=None, tracer=None):
object.__setattr__(self, "_obj", obj)
object.__setattr__(self, "_function_name", function_name)
object.__setattr__(self, "_provider_type", provider_type)
Expand All @@ -29,29 +27,22 @@ def __getattr__(self, name):

if (
name != "count_tokens" # fix for anthropic count_tokens
and not re.match(
r"<class 'anthropic\..*Error'>", str(attr)
) # fix for anthropic errors
and not re.match(
r"<class 'openai\..*Error'>", str(attr)
) # fix for openai errors
and not re.match(r"<class 'anthropic\..*Error'>", str(attr)) # fix for anthropic errors
and not re.match(r"<class 'openai\..*Error'>", str(attr)) # fix for openai errors
and (
inspect.isclass(attr)
or inspect.isfunction(attr)
or inspect.ismethod(attr)
or str(type(attr))
== "<class 'anthropic.resources.completions.Completions'>"
or str(type(attr))
== "<class 'anthropic.resources.completions.AsyncCompletions'>"
or str(type(attr)) == "<class 'anthropic.resources.completions.Completions'>"
or str(type(attr)) == "<class 'anthropic.resources.completions.AsyncCompletions'>"
or str(type(attr)) == "<class 'anthropic.resources.messages.Messages'>"
or str(type(attr))
== "<class 'anthropic.resources.messages.AsyncMessages'>"
or str(type(attr)) == "<class 'anthropic.resources.messages.AsyncMessages'>"
or re.match(r"<class 'openai\.resources.*'>", str(type(attr)))
)
):
return PromptLayerBase(
attr,
function_name=f'{object.__getattribute__(self, "_function_name")}.{name}',
function_name=f"{object.__getattribute__(self, '_function_name')}.{name}",
provider_type=object.__getattribute__(self, "_provider_type"),
api_key=object.__getattribute__(self, "_api_key"),
tracer=object.__getattribute__(self, "_tracer"),
Expand All @@ -77,16 +68,10 @@ def __call__(self, *args, **kwargs):

if tracer:
with tracer.start_as_current_span(function_name) as llm_request_span:
llm_request_span_id = hex(llm_request_span.context.span_id)[2:].zfill(
16
)
llm_request_span.set_attribute(
"provider", object.__getattribute__(self, "_provider_type")
)
llm_request_span_id = hex(llm_request_span.context.span_id)[2:].zfill(16)
llm_request_span.set_attribute("provider", object.__getattribute__(self, "_provider_type"))
llm_request_span.set_attribute("function_name", function_name)
llm_request_span.set_attribute(
"function_input", str({"args": args, "kwargs": kwargs})
)
llm_request_span.set_attribute("function_input", str({"args": args, "kwargs": kwargs}))

if inspect.isclass(function_object):
result = PromptLayerBase(
Expand All @@ -101,9 +86,7 @@ def __call__(self, *args, **kwargs):

function_response = function_object(*args, **kwargs)

if inspect.iscoroutinefunction(function_object) or inspect.iscoroutine(
function_response
):
if inspect.iscoroutinefunction(function_object) or inspect.iscoroutine(function_response):
return async_wrapper(
function_response,
return_pl_id,
Expand Down Expand Up @@ -146,9 +129,7 @@ def __call__(self, *args, **kwargs):

function_response = function_object(*args, **kwargs)

if inspect.iscoroutinefunction(function_object) or inspect.iscoroutine(
function_response
):
if inspect.iscoroutinefunction(function_object) or inspect.iscoroutine(function_response):
return async_wrapper(
function_response,
return_pl_id,
Expand Down
Loading

0 comments on commit 1430afb

Please sign in to comment.