Jina-1.py
· 789 B · Python
Raw
from jina import Executor, requests
from docarray import DocList, BaseDoc
from transformers import pipeline
class Prompt(BaseDoc):
text: str
class Generation(BaseDoc):
prompt: str
text: str
class StableLM(Executor):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.generator = pipeline(
'text-generation', model='stabilityai/stablelm-base-alpha-3b'
)
@requests
def generate(self, docs: DocList[Prompt], **kwargs) -> DocList[Generation]:
generations = DocList[Generation]()
prompts = docs.text
llm_outputs = self.generator(prompts)
for prompt, output in zip(prompts, llm_outputs):
generations.append(Generation(prompt=prompt, text=output))
return generations
1 | from jina import Executor, requests |
2 | from docarray import DocList, BaseDoc |
3 | from transformers import pipeline |
4 | |
5 | |
6 | class Prompt(BaseDoc): |
7 | text: str |
8 | |
9 | |
10 | class Generation(BaseDoc): |
11 | prompt: str |
12 | text: str |
13 | |
14 | |
15 | class StableLM(Executor): |
16 | def __init__(self, **kwargs): |
17 | super().__init__(**kwargs) |
18 | self.generator = pipeline( |
19 | 'text-generation', model='stabilityai/stablelm-base-alpha-3b' |
20 | ) |
21 | |
22 | @requests |
23 | def generate(self, docs: DocList[Prompt], **kwargs) -> DocList[Generation]: |
24 | generations = DocList[Generation]() |
25 | prompts = docs.text |
26 | llm_outputs = self.generator(prompts) |
27 | for prompt, output in zip(prompts, llm_outputs): |
28 | generations.append(Generation(prompt=prompt, text=output)) |
29 | return generations |