knox / Burn-readme-2.rs
0 likes
0 forks
1 files
Last active
1 | use burn::backend::{Autodiff, Wgpu}; |
2 | use burn::tensor::{Distribution, Tensor}; |
3 | |
4 | fn main() { |
5 | type Backend = Autodiff<Wgpu>; |
6 | |
7 | let x: Tensor<Backend, 2> = Tensor::random([32, 32], Distribution::Default); |
8 | let y: Tensor<Backend, 2> = Tensor::random([32, 32], Distribution::Default).require_grad(); |
9 | |
10 | let tmp = x.clone() + y.clone(); |
knox / Burn-readme-1.rs
0 likes
0 forks
1 files
Last active
1 | fn gelu_custom<B: Backend, const D: usize>(x: Tensor<B, D>) -> Tensor<B, D> { |
2 | let x = x.clone() * ((x / SQRT_2).erf() + 1); |
3 | x / 2 |
4 | } |
knox / Answer-readme-2.sh
0 likes
0 forks
1 files
Last active
1 | # install wire and mockgen for building |
2 | $ make generate |
3 | # install frontend dependencies and build |
4 | $ make ui |
5 | # install backend dependencies and build |
6 | $ make build |
knox / Answer-readme-1.sh
0 likes
0 forks
1 files
Last active
1 | docker run -d -p 9080:80 -v answer-data:/data --name answer apache/answer:1.4.0 |
knox / Diffusers-7.bib
0 likes
0 forks
1 files
Last active
1 | @misc{von-platen-etal-2022-diffusers, |
2 | author = {Patrick von Platen and Suraj Patil and Anton Lozhkov and Pedro Cuenca and Nathan Lambert and Kashif Rasul and Mishig Davaadorj and Dhruv Nair and Sayak Paul and William Berman and Yiyi Xu and Steven Liu and Thomas Wolf}, |
3 | title = {Diffusers: State-of-the-art diffusion models}, |
4 | year = {2022}, |
5 | publisher = {GitHub}, |
6 | journal = {GitHub repository}, |
7 | howpublished = {\url{https://github.com/huggingface/diffusers}} |
8 | } |
knox / Diffusers-6.md
0 likes
0 forks
1 files
Last active
Documentation | What can I learn? |
---|---|
Tutorial | A basic crash course for learning how to use the library's most important features like using models and schedulers to build your own diffusion system, and training your own diffusion model. |
Loading | Guides for how to load and configure all the components (pipelines, models, and schedulers) of the library, as well as how to use different schedulers. |
Pipelines for inference | Guides for how to use pipelines for different inference tasks, batched generation, controlling generated outputs and randomness, and how to contribute a pipeline to the library. |
Optimization | Guides for how to optimize your diffusion model to run faster and consume less memory. |
Training | Guides for how to train a diffusion model for different tasks with different training techniques. |
knox / Diffusers-5.py
0 likes
0 forks
1 files
Last active
1 | from diffusers import DDPMScheduler, UNet2DModel |
2 | from PIL import Image |
3 | import torch |
4 | |
5 | scheduler = DDPMScheduler.from_pretrained("google/ddpm-cat-256") |
6 | model = UNet2DModel.from_pretrained("google/ddpm-cat-256").to("cuda") |
7 | scheduler.set_timesteps(50) |
8 | |
9 | sample_size = model.config.sample_size |
10 | noise = torch.randn((1, 3, sample_size, sample_size), device="cuda") |
knox / Diffusers-4.py
0 likes
0 forks
1 files
Last active
1 | from diffusers import DiffusionPipeline |
2 | import torch |
3 | |
4 | pipeline = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16) |
5 | pipeline.to("cuda") |
6 | pipeline("An image of a squirrel in Picasso style").images[0] |
knox / Diffusers-2.sh
0 likes
0 forks
1 files
Last active
1 | conda install -c conda-forge diffusers |
knox / Diffusers-1.sh
0 likes
0 forks
1 files
Last active
1 | pip install --upgrade diffusers[torch] |