CINXE.COM

Fine-tune Llama 2 on Replicate - Replicate blog

<!DOCTYPE html><html lang="en" class=""><head><meta charSet="utf-8"/><meta name="viewport" content="width=device-width, initial-scale=1"/><link rel="icon" type="image/png" id="default-favicon" href="/assets/favicon-light-mode.png" media="(prefers-color-scheme: light)"/><link rel="icon" type="image/png" href="/assets/favicon-dark-mode.png" media="(prefers-color-scheme: dark)"/><title>Fine-tune Llama 2 on Replicate - Replicate blog</title><meta property="og:title" content="Fine-tune Llama 2 on Replicate"/><meta name="description" content="So you want to train a llama..."/><meta name="og:description" content="So you want to train a llama..."/><meta name="og:image" content="https://replicate.com/blog/fine-tune-llama-2.png"/><meta property="og:type" content="website"/><meta name="twitter:card" content="summary_large_image"/><meta name="twitter:site" content="@replicate"/><meta name="twitter:title" content="Fine-tune Llama 2 on Replicate"/><meta name="twitter:description" content="So you want to train a llama..."/><meta name="twitter:image" content="https://replicate.com/blog/fine-tune-llama-2.png"/><meta name="color-scheme" content="dark light"/><script> (() => { const theme = window.matchMedia("(prefers-color-scheme: light)").matches ? 'light' : 'dark'; const cl = document.documentElement.classList; const dataAttr = document.documentElement.dataset.theme; if (dataAttr != null) { const themeAlreadyApplied = dataAttr === 'light' || dataAttr === 'dark'; if (!themeAlreadyApplied) { document.documentElement.dataset.theme = theme; } } else { const themeAlreadyApplied = cl.contains('light') || cl.contains('dark'); if (!themeAlreadyApplied) { cl.add(theme); } } const meta = document.querySelector('meta[name=color-scheme]'); if (meta) { if (theme === 'dark') { meta.content = 'dark light'; } else if (theme === 'light') { meta.content = 'light dark'; } } })(); </script><link rel="stylesheet" href="/frontend-assets/index-Ds7p1bbX.css"/><link rel="stylesheet" href="/frontend-assets/index-CGt2XKj9.css"/><link rel="stylesheet" href="/frontend-assets/nprogress-CSXic_Zd.css"/></head><body><header class="bg-white/80 dark:bg-r8-gray-a1 top-0 left-0 right-0 z-10 relative backdrop-blur-md md:fixed"><div class="h-[var(--header-height)] border-b border-r8-gray-6 dark:border-white/10 flex items-center"><div data-header-container="true" class="max-w-screen-2xl px-6 lg:px-16 w-full mx-auto flex items-center justify-between"><div class="flex items-center gap-2"><a href="https://replicate.com" class="text-black dark:text-white size-8 inline-flex items-center justify-center focus:bg-black focus:text-white dark:focus:bg-white text-r8-sm focus:outline-black dark:focus:outline-white dark:focus:text-black outline-offset-0 focus:outline outline-8"><div class="size-8"><svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xml:space="preserve" viewBox="0 0 1000 1000" fill="currentColor"><title>Replicate logo</title><g><polygon points="1000,427.6 1000,540.6 603.4,540.6 603.4,1000 477,1000 477,427.6"></polygon><polygon points="1000,213.8 1000,327 364.8,327 364.8,1000 238.4,1000 238.4,213.8"></polygon><polygon points="1000,0 1000,113.2 126.4,113.2 126.4,1000 0,1000 0,0"></polygon></g></svg></div></a></div><nav class="hidden md:flex items-center gap-4"><a class="link-focus group text-r8-gray-11 hover:text-r8-gray-12" href="https://replicate.com/explore">Explore</a><a class="link-focus group text-r8-gray-11 hover:text-r8-gray-12" href="https://replicate.com/playground">Playground<span class="text-r8-xs ml-1 border text-r8-green-a10 border-r8-green-a10 rounded-full px-2 py-0.5 group-focus:text-r8-gray-1 group-focus:border-r8-gray-1 group-focus:bg-r8-gray-12 group-hover:bg-r8-green-10 group-hover:border-r8-green-1 group-hover:text-r8-green-1 transition-all">Beta</span></a><a class="link-focus group text-r8-gray-11 hover:text-r8-gray-12" href="https://replicate.com/pricing">Pricing</a><a class="link-focus group text-r8-gray-11 hover:text-r8-gray-12" href="/docs" data-discover="true">Docs</a><a aria-current="page" class="link-focus group text-r8-gray-12 underline underline-offset-4" href="/blog" data-discover="true">Blog</a><a class="link-focus group text-r8-gray-11 hover:text-r8-gray-12" href="/changelog" data-discover="true">Changelog</a><a class="link-focus group link-primary !px-0.5" href="https://replicate.com/signin">Sign in</a></nav><div class="md:hidden "><button aria-expanded="false" class="r8-btn r8-btn--outlined r8-btn--primary r8-btn--sm"><span class="r8-btn__icon"><svg xmlns="http://www.w3.org/2000/svg" width="1em" height="1em" fill="currentColor" viewBox="0 0 256 256"><path d="M228,128a12,12,0,0,1-12,12H40a12,12,0,0,1,0-24H216A12,12,0,0,1,228,128ZM40,76H216a12,12,0,0,0,0-24H40a12,12,0,0,0,0,24ZM216,180H40a12,12,0,0,0,0,24H216a12,12,0,0,0,0-24Z"></path></svg></span>Menu</button></div></div></div></header><div id=":R4j5:" hidden="" class="md:hidden" style="display:none"><div class="bg-white dark:bg-r8-gray-1 border-b border-r8-gray-6"><div class="max-w-screen-2xl px-6 pb-4 mx-auto"><nav class="flex flex-col gap-3 py-4"><a class="link-focus group text-r8-gray-11 hover:text-r8-gray-12" href="https://replicate.com/explore">Explore</a><a class="link-focus group text-r8-gray-11 hover:text-r8-gray-12" href="https://replicate.com/playground">Playground<span class="text-r8-xs ml-1 border text-r8-green-a10 border-r8-green-a10 rounded-full px-2 py-0.5 group-focus:text-r8-gray-1 group-focus:border-r8-gray-1 group-focus:bg-r8-gray-12 group-hover:bg-r8-green-10 group-hover:border-r8-green-1 group-hover:text-r8-green-1 transition-all">Beta</span></a><a class="link-focus group text-r8-gray-11 hover:text-r8-gray-12" href="https://replicate.com/pricing">Pricing</a><a class="link-focus group text-r8-gray-11 hover:text-r8-gray-12" href="/docs" data-discover="true">Docs</a><a aria-current="page" class="link-focus group text-r8-gray-12 underline underline-offset-4" href="/blog" data-discover="true">Blog</a><a class="link-focus group text-r8-gray-11 hover:text-r8-gray-12" href="/changelog" data-discover="true">Changelog</a><a class="link-focus group link-primary !px-0.5" href="https://replicate.com/signin">Sign in</a></nav></div></div></div><div class="container px-6 md:pt-[var(--header-height)] min-h-[calc(100dvh-var(--header-height))]"><div class="max-w-screen-xl py-8"><div><div><h1 class="text-r8-4xl tracking-tight text-balance leading-tight font-heading font-semibold">Fine-tune Llama 2 on Replicate</h1><div class="mt-2 text-r8-gray-10">Posted <span>July 20, 2023</span> by<!-- --> <ul class="inline-flex items-center gap-2"><li><a href="https://replicate.com/cbh123" class="underline">cbh123</a></li></ul></div></div><div class="docs-prose"><p>Llama 2 is the first open-source language model of the same caliber as OpenAI’s models, and because it’s open source you can hack it to do new things that aren’t possible with GPT-4.</p> <p>Like become a <a href="https://replicate.com/blog/turn-your-llm-into-a-poet">better poet</a>. Talk like <a href="https://replicate.com/blog/fine-tune-llama-to-speak-like-homer-simpson">Homer Simpson</a>. Write <a href="https://twitter.com/fofrAI/status/1651259806949187590?s=20">Midjourney prompts.</a> Or replace your <a href="https://www.izzy.co/blogs/robo-boys.html">best friends</a>.</p> <figure class="mb-2lh"><img class="h-96" src="/assets/blog/fine-tune-llama-2/llama-stampede.png" alt="A stampede of futuristic llamas"/><figcaption class="text-r8-gray-11 underline-links text-sm">A stampede of futuristic llamas by <a href="https://replicate.com/p/3rrqugrbuwlddcmtokphb6uelm">ai-forever/kandinsky-2.2</a></figcaption></figure> <p>One of the main reasons to fine-tune models is so you can use a small model do a task that would normally require a large model. This means you can do the same task, but cheaper and faster. For example, the 7 billion parameter Llama 2 model is not good at summarizing text, but we can teach it how.</p> <p>In this guide, we’ll show you how to create a text summarizer. We&#x27;ll be using <a href="/meta/llama-2-7b">Llama 2 7B</a>, an open-source large language model from Meta and fine-tuning it on a dataset of messenger-like conversations with summaries. When we&#x27;re done, you&#x27;ll be able to distill chat transcripts, emails, webpages, and other documents into a brief summary. Short and sweet.</p> <h2 id="supported-models"><a class="mdx-heading-anchor" aria-label="Anchor for supported-models" href="#supported-models"><span class="icon icon-link"></span></a>Supported models</h2> <p>Here are the Llama models on Replicate that you can fine-tune:</p> <ul> <li><a href="https://replicate.com/meta/llama-2-7b">Llama 2 7B Base</a></li> <li><a href="https://replicate.com/meta/llama-2-13b">Llama 2 13B Base</a></li> <li><a href="https://replicate.com/meta/llama-2-70b">Llama 2 70B Base</a></li> <li><a href="https://replicate.com/meta/llama-2-7b-chat">Llama 2 7B Chat</a></li> <li><a href="https://replicate.com/meta/llama-2-13b-chat">Llama 2 13B Chat</a></li> <li><a href="https://replicate.com/meta/llama-2-70b-chat">Llama 2 70B Chat</a></li> </ul> <p>If your model is responding to instructions from users, you want to use the chat models. If you are just completing text, you&#x27;ll want to use the base.</p> <h2 id="training-data"><a class="mdx-heading-anchor" aria-label="Anchor for training-data" href="#training-data"><span class="icon icon-link"></span></a>Training data</h2> <p>Your training data should be in a JSONL text file.</p> <p>In this guide, we’ll be using <a href="https://huggingface.co/datasets/samsum">the SAMSum dataset</a>, transformed into JSONL.</p> <h2 id="create-a-model"><a class="mdx-heading-anchor" aria-label="Anchor for create-a-model" href="#create-a-model"><span class="icon icon-link"></span></a>Create a model</h2> <p>You need to create an empty model on Replicate for your trained model. When your training finishes, it will be pushed as a new <a href="https://replicate.com/docs/how-does-replicate-work#terminology">version</a> to this model.</p> <p>Go to <a href="https://replicate.com/create?name=llama2-summarizer&amp;visibility=private&amp;purpose=fine-tune-language">replicate.com/create</a> and create a new model called “llama2-summarizer”.</p> <h2 id="authenticate"><a class="mdx-heading-anchor" aria-label="Anchor for authenticate" href="#authenticate"><span class="icon icon-link"></span></a>Authenticate</h2> <p>Authenticate by setting your token in an environment variable:</p> <figure data-rehype-pretty-code-figure=""><pre style="--shiki-dark:#e6edf3;--shiki-light:#24292e;--shiki-dark-bg:#0d1117;--shiki-light-bg:#fff" tabindex="0" data-language="bash" data-theme="github-dark-default github-light"><code data-language="bash" data-theme="github-dark-default github-light" style="display:grid"><span data-line=""><span style="--shiki-dark:#FF7B72;--shiki-light:#D73A49">export</span><span style="--shiki-dark:#E6EDF3;--shiki-light:#24292E"> REPLICATE_API_TOKEN</span><span style="--shiki-dark:#FF7B72;--shiki-light:#D73A49">=&lt;</span><span style="--shiki-dark:#E6EDF3;--shiki-light:#24292E">paste-your-token-here</span><span style="--shiki-dark:#FF7B72;--shiki-light:#D73A49">&gt;</span></span></code></pre></figure> <p>Find your API token in your <a href="/account/api-tokens">account settings</a>.</p> <h2 id="create-a-training"><a class="mdx-heading-anchor" aria-label="Anchor for create-a-training" href="#create-a-training"><span class="icon icon-link"></span></a>Create a training</h2> <p>Install the Python library:</p> <figure data-rehype-pretty-code-figure=""><pre style="--shiki-dark:#e6edf3;--shiki-light:#24292e;--shiki-dark-bg:#0d1117;--shiki-light-bg:#fff" tabindex="0" data-language="bash" data-theme="github-dark-default github-light"><code data-language="bash" data-theme="github-dark-default github-light" style="display:grid"><span data-line=""><span style="--shiki-dark:#FFA657;--shiki-light:#6F42C1">pip</span><span style="--shiki-dark:#A5D6FF;--shiki-light:#032F62"> install</span><span style="--shiki-dark:#A5D6FF;--shiki-light:#032F62"> replicate</span></span></code></pre></figure> <p>And kick off training, replacing the destination name with your username and the name of your new model:</p> <figure data-rehype-pretty-code-figure=""><pre style="--shiki-dark:#e6edf3;--shiki-light:#24292e;--shiki-dark-bg:#0d1117;--shiki-light-bg:#fff" tabindex="0" data-language="python" data-theme="github-dark-default github-light"><code data-language="python" data-theme="github-dark-default github-light" style="display:grid"><span data-line=""><span style="--shiki-dark:#FF7B72;--shiki-light:#D73A49">import</span><span style="--shiki-dark:#E6EDF3;--shiki-light:#24292E"> replicate</span></span> <span data-line=""> </span> <span data-line=""><span style="--shiki-dark:#E6EDF3;--shiki-light:#24292E">training </span><span style="--shiki-dark:#FF7B72;--shiki-light:#D73A49">=</span><span style="--shiki-dark:#E6EDF3;--shiki-light:#24292E"> replicate.trainings.create(</span></span> <span data-line=""><span style="--shiki-dark:#FFA657;--shiki-light:#E36209"> version</span><span style="--shiki-dark:#FF7B72;--shiki-light:#D73A49">=</span><span style="--shiki-dark:#A5D6FF;--shiki-light:#032F62">&quot;meta/llama-2-7b:73001d654114dad81ec65da3b834e2f691af1e1526453189b7bf36fb3f32d0f9&quot;</span><span style="--shiki-dark:#E6EDF3;--shiki-light:#24292E">,</span></span> <span data-line=""><span style="--shiki-dark:#FFA657;--shiki-light:#E36209"> input</span><span style="--shiki-dark:#FF7B72;--shiki-light:#D73A49">=</span><span style="--shiki-dark:#E6EDF3;--shiki-light:#24292E">{</span></span> <span data-line=""><span style="--shiki-dark:#A5D6FF;--shiki-light:#032F62"> &quot;train_data&quot;</span><span style="--shiki-dark:#E6EDF3;--shiki-light:#24292E">: </span><span style="--shiki-dark:#A5D6FF;--shiki-light:#032F62">&quot;https://gist.githubusercontent.com/nateraw/055c55b000e4c37d43ce8eb142ccc0a2/raw/d13853512fc83e8c656a3e8b6e1270dd3c398e77/samsum.jsonl&quot;</span><span style="--shiki-dark:#E6EDF3;--shiki-light:#24292E">,</span></span> <span data-line=""><span style="--shiki-dark:#A5D6FF;--shiki-light:#032F62"> &quot;num_train_epochs&quot;</span><span style="--shiki-dark:#E6EDF3;--shiki-light:#24292E">: </span><span style="--shiki-dark:#79C0FF;--shiki-light:#005CC5">3</span><span style="--shiki-dark:#E6EDF3;--shiki-light:#24292E">,</span></span> <span data-line=""><span style="--shiki-dark:#E6EDF3;--shiki-light:#24292E"> },</span></span> <span data-line=""><span style="--shiki-dark:#FFA657;--shiki-light:#E36209"> destination</span><span style="--shiki-dark:#FF7B72;--shiki-light:#D73A49">=</span><span style="--shiki-dark:#A5D6FF;--shiki-light:#032F62">&quot;&lt;your-username&gt;/llama2-summarizer&quot;</span></span> <span data-line=""><span style="--shiki-dark:#E6EDF3;--shiki-light:#24292E">)</span></span> <span data-line=""> </span> <span data-line=""><span style="--shiki-dark:#79C0FF;--shiki-light:#005CC5">print</span><span style="--shiki-dark:#E6EDF3;--shiki-light:#24292E">(training)</span></span></code></pre></figure> <p>It takes these arguments:</p> <ul> <li><code>version</code>: The model to train, in the format <code>{username}/{model}:{version}</code>.</li> <li><code>input</code>: The training data and params to pass to the training process, which are defined by the model. Llama 2&#x27;s params can be found in <a href="https://replicate.com/meta/llama-2-13b/train#training-inputs">the model&#x27;s &quot;Train&quot; tab</a>.</li> <li><code>destination</code>: The model to push the trained version to, in the format <code>your-username/your-model-name</code></li> </ul> <p>Once you&#x27;ve kicked off your training, visit <a href="https://replicate.com/trainings">replicate.com/trainings</a> in your browser to monitor the progress.</p> <h2 id="run-the-model"><a class="mdx-heading-anchor" aria-label="Anchor for run-the-model" href="#run-the-model"><span class="icon icon-link"></span></a>Run the model</h2> <p>You can now run your model from the web or with an API. To use your model in the browser, go to your model page.</p> <p>To use your model with an API, run the <code>version</code> from the training output:</p> <figure data-rehype-pretty-code-figure=""><pre style="--shiki-dark:#e6edf3;--shiki-light:#24292e;--shiki-dark-bg:#0d1117;--shiki-light-bg:#fff" tabindex="0" data-language="python" data-theme="github-dark-default github-light"><code data-language="python" data-theme="github-dark-default github-light" style="display:grid"><span data-line=""><span style="--shiki-dark:#E6EDF3;--shiki-light:#24292E">training.reload()</span></span> <span data-line=""> </span> <span data-line=""><span style="--shiki-dark:#E6EDF3;--shiki-light:#24292E">prompt </span><span style="--shiki-dark:#FF7B72;--shiki-light:#D73A49">=</span><span style="--shiki-dark:#A5D6FF;--shiki-light:#032F62"> &quot;&quot;&quot;[INST] &lt;&lt;SYS&gt;&gt;</span><span style="--shiki-dark:#79C0FF;--shiki-light:#005CC5">\</span></span> <span data-line=""><span style="--shiki-dark:#A5D6FF;--shiki-light:#032F62">Use the Input to provide a summary of a conversation.</span></span> <span data-line=""><span style="--shiki-dark:#A5D6FF;--shiki-light:#032F62">&lt;&lt;/SYS&gt;&gt;</span></span> <span data-line=""> </span> <span data-line=""><span style="--shiki-dark:#A5D6FF;--shiki-light:#032F62">Input:</span></span> <span data-line=""><span style="--shiki-dark:#A5D6FF;--shiki-light:#032F62">Harry: Who are you?</span></span> <span data-line=""><span style="--shiki-dark:#A5D6FF;--shiki-light:#032F62">Hagrid: Rubeus Hagrid, Keeper of Keys and Grounds at Hogwarts. Of course, you know all about Hogwarts.</span></span> <span data-line=""><span style="--shiki-dark:#A5D6FF;--shiki-light:#032F62">Harry: Sorry, no.</span></span> <span data-line=""><span style="--shiki-dark:#A5D6FF;--shiki-light:#032F62">Hagrid: No? Blimey, Harry, did you never wonder where yer parents learned it all?</span></span> <span data-line=""><span style="--shiki-dark:#A5D6FF;--shiki-light:#032F62">Harry: All what?</span></span> <span data-line=""><span style="--shiki-dark:#A5D6FF;--shiki-light:#032F62">Hagrid: Yer a wizard, Harry.</span></span> <span data-line=""><span style="--shiki-dark:#A5D6FF;--shiki-light:#032F62">Harry: I-- I&#x27;m a what?</span></span> <span data-line=""><span style="--shiki-dark:#A5D6FF;--shiki-light:#032F62">Hagrid: A wizard! And a thumpin&#x27; good &#x27;un, I&#x27;ll wager, once you&#x27;ve been trained up a bit. [/INST]</span></span> <span data-line=""> </span> <span data-line=""><span style="--shiki-dark:#A5D6FF;--shiki-light:#032F62">Summary: &quot;&quot;&quot;</span></span> <span data-line=""> </span> <span data-line=""><span style="--shiki-dark:#E6EDF3;--shiki-light:#24292E">output </span><span style="--shiki-dark:#FF7B72;--shiki-light:#D73A49">=</span><span style="--shiki-dark:#E6EDF3;--shiki-light:#24292E"> replicate.run(</span></span> <span data-line=""><span style="--shiki-dark:#E6EDF3;--shiki-light:#24292E"> training.output[</span><span style="--shiki-dark:#A5D6FF;--shiki-light:#032F62">&quot;version&quot;</span><span style="--shiki-dark:#E6EDF3;--shiki-light:#24292E">],</span></span> <span data-line=""><span style="--shiki-dark:#FFA657;--shiki-light:#E36209"> input</span><span style="--shiki-dark:#FF7B72;--shiki-light:#D73A49">=</span><span style="--shiki-dark:#E6EDF3;--shiki-light:#24292E">{</span><span style="--shiki-dark:#A5D6FF;--shiki-light:#032F62">&quot;prompt&quot;</span><span style="--shiki-dark:#E6EDF3;--shiki-light:#24292E">: prompt, </span><span style="--shiki-dark:#A5D6FF;--shiki-light:#032F62">&quot;stop_sequences&quot;</span><span style="--shiki-dark:#E6EDF3;--shiki-light:#24292E">: </span><span style="--shiki-dark:#A5D6FF;--shiki-light:#032F62">&quot;&lt;/s&gt;&quot;</span><span style="--shiki-dark:#E6EDF3;--shiki-light:#24292E">}</span></span> <span data-line=""><span style="--shiki-dark:#E6EDF3;--shiki-light:#24292E">)</span></span> <span data-line=""><span style="--shiki-dark:#FF7B72;--shiki-light:#D73A49">for</span><span style="--shiki-dark:#E6EDF3;--shiki-light:#24292E"> s </span><span style="--shiki-dark:#FF7B72;--shiki-light:#D73A49">in</span><span style="--shiki-dark:#E6EDF3;--shiki-light:#24292E"> output:</span></span> <span data-line=""><span style="--shiki-dark:#79C0FF;--shiki-light:#005CC5"> print</span><span style="--shiki-dark:#E6EDF3;--shiki-light:#24292E">(s, </span><span style="--shiki-dark:#FFA657;--shiki-light:#E36209">end</span><span style="--shiki-dark:#FF7B72;--shiki-light:#D73A49">=</span><span style="--shiki-dark:#A5D6FF;--shiki-light:#032F62">&quot;&quot;</span><span style="--shiki-dark:#E6EDF3;--shiki-light:#24292E">, </span><span style="--shiki-dark:#FFA657;--shiki-light:#E36209">flush</span><span style="--shiki-dark:#FF7B72;--shiki-light:#D73A49">=</span><span style="--shiki-dark:#79C0FF;--shiki-light:#005CC5">True</span><span style="--shiki-dark:#E6EDF3;--shiki-light:#24292E">)</span></span></code></pre></figure> <p>That&#x27;s it! You&#x27;ve fine-tuned Llama 2 and can run your new model with an API.</p> <h2 id="next-steps"><a class="mdx-heading-anchor" aria-label="Anchor for next-steps" href="#next-steps"><span class="icon icon-link"></span></a>Next steps</h2> <ul> <li><a href="https://x.com/replicate">Follow us on <s>Twitter</s> X to get the latest from the Llamaverse.</a></li> <li><a href="https://replicate.com/blog/run-llama-2-with-an-api">Run the base Llama 2 with an API.</a></li> </ul> <p>Happy hacking! 🦙</p></div></div></div></div><footer class="border-t border-r8-gray-5 py-4 text-r8-sm mx-auto"><div class="container flex items-center justify-between flex-wrap gap-4"><div class="dark:invert dark:hue-rotate-180 rounded [color-scheme:light]"><iframe src="https://replicatestatus.com/badge" id="status-badge" height="30" width="100%" title="Replicate status"></iframe></div><div><button class="r8-btn r8-btn--outlined r8-btn--primary r8-btn--md r8-btn--icon undefined"><span class="r8-btn__icon"><svg xmlns="http://www.w3.org/2000/svg" width="1em" height="1em" fill="currentColor" viewBox="0 0 256 256"><path d="M232,168h-8V72a24,24,0,0,0-24-24H56A24,24,0,0,0,32,72v96H24a8,8,0,0,0-8,8v16a24,24,0,0,0,24,24H216a24,24,0,0,0,24-24V176A8,8,0,0,0,232,168ZM48,72a8,8,0,0,1,8-8H200a8,8,0,0,1,8,8v96H48ZM224,192a8,8,0,0,1-8,8H40a8,8,0,0,1-8-8v-8H224ZM152,88a8,8,0,0,1-8,8H112a8,8,0,0,1,0-16h32A8,8,0,0,1,152,88Z"></path></svg></span></button><span id=":R2dj5:" style="position:fixed" hidden=""></span><button class="r8-btn r8-btn--clear r8-btn--primary r8-btn--md r8-btn--icon undefined"><span class="r8-btn__icon"><svg xmlns="http://www.w3.org/2000/svg" width="1em" height="1em" fill="currentColor" viewBox="0 0 256 256"><path d="M120,40V16a8,8,0,0,1,16,0V40a8,8,0,0,1-16,0Zm72,88a64,64,0,1,1-64-64A64.07,64.07,0,0,1,192,128Zm-16,0a48,48,0,1,0-48,48A48.05,48.05,0,0,0,176,128ZM58.34,69.66A8,8,0,0,0,69.66,58.34l-16-16A8,8,0,0,0,42.34,53.66Zm0,116.68-16,16a8,8,0,0,0,11.32,11.32l16-16a8,8,0,0,0-11.32-11.32ZM192,72a8,8,0,0,0,5.66-2.34l16-16a8,8,0,0,0-11.32-11.32l-16,16A8,8,0,0,0,192,72Zm5.66,114.34a8,8,0,0,0-11.32,11.32l16,16a8,8,0,0,0,11.32-11.32ZM48,128a8,8,0,0,0-8-8H16a8,8,0,0,0,0,16H40A8,8,0,0,0,48,128Zm80,80a8,8,0,0,0-8,8v24a8,8,0,0,0,16,0V216A8,8,0,0,0,128,208Zm112-88H216a8,8,0,0,0,0,16h24a8,8,0,0,0,0-16Z"></path></svg></span></button><span id=":R2lj5:" style="position:fixed" hidden=""></span><button class="r8-btn r8-btn--clear r8-btn--primary r8-btn--md r8-btn--icon undefined"><span class="r8-btn__icon"><svg xmlns="http://www.w3.org/2000/svg" width="1em" height="1em" fill="currentColor" viewBox="0 0 256 256"><path d="M233.54,142.23a8,8,0,0,0-8-2,88.08,88.08,0,0,1-109.8-109.8,8,8,0,0,0-10-10,104.84,104.84,0,0,0-52.91,37A104,104,0,0,0,136,224a103.09,103.09,0,0,0,62.52-20.88,104.84,104.84,0,0,0,37-52.91A8,8,0,0,0,233.54,142.23ZM188.9,190.34A88,88,0,0,1,65.66,67.11a89,89,0,0,1,31.4-26A106,106,0,0,0,96,56,104.11,104.11,0,0,0,200,160a106,106,0,0,0,14.92-1.06A89,89,0,0,1,188.9,190.34Z"></path></svg></span></button><span id=":R2tj5:" style="position:fixed" hidden=""></span></div></div></footer><link rel="modulepreload" href="/assets/entry.client-BwK8caee.js"/><link rel="modulepreload" href="/frontend-assets/chunk-HA7DTUK3-BVDp7VCB.js"/><link rel="modulepreload" href="/frontend-assets/index-DpchfCeG.js"/><link rel="modulepreload" href="/frontend-assets/index-C39VWG4p.js"/><link rel="modulepreload" href="/assets/redirect-DfbXaSx1.js"/><link rel="modulepreload" href="/frontend-assets/exports-DRM9pL-2.js"/><link rel="modulepreload" href="/assets/root-DzjtAaUO.js"/><link rel="modulepreload" href="/frontend-assets/with-props-Brf4Fy-b.js"/><link rel="modulepreload" href="/frontend-assets/index-C0qJxpfZ.js"/><link rel="modulepreload" href="/frontend-assets/index-DkjZEnzH.js"/><link rel="modulepreload" href="/frontend-assets/index-BhFLEXu2.js"/><link rel="modulepreload" href="/frontend-assets/useRequest-Jxg2S_l5.js"/><link rel="modulepreload" href="/frontend-assets/client-only-3gf2CKHz.js"/><link rel="modulepreload" href="/frontend-assets/glyph-loader-BpijCgqK.js"/><link rel="modulepreload" href="/frontend-assets/index-BKyjhPpS.js"/><link rel="modulepreload" href="/frontend-assets/Warning-DlTJ60fz.js"/><link rel="modulepreload" href="/frontend-assets/bg-goo-BSf1KuIl.js"/><link rel="modulepreload" href="/frontend-assets/replicate-BaRJS5cI.js"/><link rel="modulepreload" href="/frontend-assets/index-RIBYm-Q5.js"/><link rel="modulepreload" href="/frontend-assets/index-C4Hi1N1n.js"/><link rel="modulepreload" href="/frontend-assets/index-DCRUtxEn.js"/><link rel="modulepreload" href="/frontend-assets/root-8eZD8nXZ.js"/><link rel="modulepreload" href="/frontend-assets/constants-BU-Voyre.js"/><link rel="modulepreload" href="/frontend-assets/isBrowser-C5DMOrl5.js"/><link rel="modulepreload" href="/frontend-assets/debounce-CbIcyUWR.js"/><link rel="modulepreload" href="/frontend-assets/IconBase-aPY_BkEt.js"/><link rel="modulepreload" href="/frontend-assets/use-reduced-motion-DtGVGoV5.js"/><link rel="modulepreload" href="/frontend-assets/proxy-kx7hH_pw.js"/><link rel="modulepreload" href="/frontend-assets/index-CxO9y2oK.js"/><link rel="modulepreload" href="/frontend-assets/use-in-view-DEgJ9Ptc.js"/><link rel="modulepreload" href="/frontend-assets/_commonjs-dynamic-modules-DYt-0jxj.js"/><link rel="modulepreload" href="/frontend-assets/index-D25pJ9J9.js"/><link rel="modulepreload" href="/frontend-assets/avatar-menu-BukIyJn1.js"/><link rel="modulepreload" href="/frontend-assets/index-VObRRx9T.js"/><link rel="modulepreload" href="/frontend-assets/index-C99v8T86.js"/><link rel="modulepreload" href="/frontend-assets/tooltip-provider-Bpokk5PD.js"/><link rel="modulepreload" href="/frontend-assets/string-mMrxK_zr.js"/><link rel="modulepreload" href="/frontend-assets/4CMBR7SL-BalhXzPm.js"/><link rel="modulepreload" href="/frontend-assets/logo-glyph-J3lYGwuy.js"/><link rel="modulepreload" href="/assets/layout-JZqblKiQ.js"/><link rel="modulepreload" href="/frontend-assets/index-Dz6kh0GH.js"/><link rel="modulepreload" href="/assets/detail-C9MooHAM.js"/><link rel="modulepreload" href="/assets/fine-tune-llama-2-Df6dXqkW.js"/><link rel="modulepreload" href="/frontend-assets/blog-CNe9F6VR.js"/><link rel="modulepreload" href="/frontend-assets/index-ChvNWze6.js"/><link rel="modulepreload" href="/frontend-assets/feed-eMWEd8FT.js"/><script>window.__reactRouterContext = {"basename":"/","future":{"unstable_optimizeDeps":false,"unstable_splitRouteModules":false,"unstable_viteEnvironmentApi":false},"ssr":true,"isSpaMode":false};window.__reactRouterContext.stream = new ReadableStream({start(controller){window.__reactRouterContext.streamController = controller;}}).pipeThrough(new TextEncoderStream());</script><script type="module" async="">; import * as route0 from "/assets/root-DzjtAaUO.js"; import * as route1 from "/assets/layout-JZqblKiQ.js"; import * as route2 from "/assets/detail-C9MooHAM.js"; import * as route3 from "/assets/fine-tune-llama-2-Df6dXqkW.js"; window.__reactRouterManifest = { "entry": { "module": "/assets/entry.client-BwK8caee.js", "imports": [ "/frontend-assets/chunk-HA7DTUK3-BVDp7VCB.js", "/frontend-assets/index-DpchfCeG.js", "/frontend-assets/index-C39VWG4p.js", "/assets/redirect-DfbXaSx1.js", "/frontend-assets/exports-DRM9pL-2.js" ], "css": [] }, "routes": { "root": { "id": "root", "path": "", "hasAction": false, "hasLoader": true, "hasClientAction": false, "hasClientLoader": false, "hasErrorBoundary": true, "module": "/assets/root-DzjtAaUO.js", "imports": [ "/frontend-assets/chunk-HA7DTUK3-BVDp7VCB.js", "/frontend-assets/index-DpchfCeG.js", "/frontend-assets/index-C39VWG4p.js", "/assets/redirect-DfbXaSx1.js", "/frontend-assets/exports-DRM9pL-2.js", "/frontend-assets/with-props-Brf4Fy-b.js", "/frontend-assets/index-C0qJxpfZ.js", "/frontend-assets/index-DkjZEnzH.js", "/frontend-assets/index-BhFLEXu2.js", "/frontend-assets/useRequest-Jxg2S_l5.js", "/frontend-assets/client-only-3gf2CKHz.js", "/frontend-assets/glyph-loader-BpijCgqK.js", "/frontend-assets/index-BKyjhPpS.js", "/frontend-assets/Warning-DlTJ60fz.js", "/frontend-assets/bg-goo-BSf1KuIl.js", "/frontend-assets/replicate-BaRJS5cI.js", "/frontend-assets/index-RIBYm-Q5.js", "/frontend-assets/index-C4Hi1N1n.js", "/frontend-assets/index-DCRUtxEn.js", "/frontend-assets/root-8eZD8nXZ.js", "/frontend-assets/constants-BU-Voyre.js", "/frontend-assets/isBrowser-C5DMOrl5.js", "/frontend-assets/debounce-CbIcyUWR.js", "/frontend-assets/IconBase-aPY_BkEt.js", "/frontend-assets/use-reduced-motion-DtGVGoV5.js", "/frontend-assets/proxy-kx7hH_pw.js", "/frontend-assets/index-CxO9y2oK.js", "/frontend-assets/use-in-view-DEgJ9Ptc.js", "/frontend-assets/_commonjs-dynamic-modules-DYt-0jxj.js", "/frontend-assets/index-D25pJ9J9.js", "/frontend-assets/avatar-menu-BukIyJn1.js", "/frontend-assets/index-VObRRx9T.js", "/frontend-assets/index-C99v8T86.js", "/frontend-assets/tooltip-provider-Bpokk5PD.js", "/frontend-assets/string-mMrxK_zr.js", "/frontend-assets/4CMBR7SL-BalhXzPm.js", "/frontend-assets/logo-glyph-J3lYGwuy.js" ], "css": [ "/frontend-assets/index-Ds7p1bbX.css" ] }, "routes/blog/layout": { "id": "routes/blog/layout", "parentId": "root", "path": "blog", "hasAction": false, "hasLoader": true, "hasClientAction": false, "hasClientLoader": false, "hasErrorBoundary": false, "module": "/assets/layout-JZqblKiQ.js", "imports": [ "/frontend-assets/with-props-Brf4Fy-b.js", "/frontend-assets/chunk-HA7DTUK3-BVDp7VCB.js", "/frontend-assets/index-Dz6kh0GH.js", "/frontend-assets/root-8eZD8nXZ.js", "/assets/redirect-DfbXaSx1.js", "/frontend-assets/index-D25pJ9J9.js", "/frontend-assets/avatar-menu-BukIyJn1.js", "/frontend-assets/IconBase-aPY_BkEt.js", "/frontend-assets/index-VObRRx9T.js", "/frontend-assets/index-C99v8T86.js", "/frontend-assets/index-C39VWG4p.js", "/frontend-assets/tooltip-provider-Bpokk5PD.js", "/frontend-assets/index-DkjZEnzH.js", "/frontend-assets/string-mMrxK_zr.js", "/frontend-assets/4CMBR7SL-BalhXzPm.js", "/frontend-assets/index-DCRUtxEn.js", "/frontend-assets/logo-glyph-J3lYGwuy.js" ], "css": [] }, "routes/blog/detail": { "id": "routes/blog/detail", "parentId": "routes/blog/layout", "hasAction": false, "hasLoader": true, "hasClientAction": false, "hasClientLoader": false, "hasErrorBoundary": false, "module": "/assets/detail-C9MooHAM.js", "imports": [ "/frontend-assets/with-props-Brf4Fy-b.js", "/frontend-assets/chunk-HA7DTUK3-BVDp7VCB.js", "/frontend-assets/index-BKyjhPpS.js", "/frontend-assets/index-C0qJxpfZ.js", "/assets/redirect-DfbXaSx1.js", "/frontend-assets/constants-BU-Voyre.js" ], "css": [] }, "content/blog/fine-tune-llama-2": { "id": "content/blog/fine-tune-llama-2", "parentId": "routes/blog/detail", "path": "fine-tune-llama-2", "hasAction": false, "hasLoader": false, "hasClientAction": false, "hasClientLoader": false, "hasErrorBoundary": false, "module": "/assets/fine-tune-llama-2-Df6dXqkW.js", "imports": [ "/frontend-assets/with-props-Brf4Fy-b.js", "/frontend-assets/chunk-HA7DTUK3-BVDp7VCB.js", "/frontend-assets/blog-CNe9F6VR.js", "/frontend-assets/index-ChvNWze6.js", "/assets/redirect-DfbXaSx1.js", "/frontend-assets/feed-eMWEd8FT.js", "/frontend-assets/index-DpchfCeG.js" ], "css": [] }, "routes/home": { "id": "routes/home", "parentId": "root", "index": true, "hasAction": false, "hasLoader": false, "hasClientAction": false, "hasClientLoader": false, "hasErrorBoundary": false, "module": "/assets/home-DZRtxAC1.js", "imports": [ "/frontend-assets/with-props-Brf4Fy-b.js", "/frontend-assets/chunk-HA7DTUK3-BVDp7VCB.js", "/frontend-assets/index-DCRUtxEn.js", "/frontend-assets/index-VObRRx9T.js", "/assets/redirect-DfbXaSx1.js", "/frontend-assets/tooltip-provider-Bpokk5PD.js", "/frontend-assets/bash-BrBG-s_R.js", "/frontend-assets/bg-goo-BSf1KuIl.js", "/frontend-assets/tab-provider-CC0CSrnq.js", "/frontend-assets/index-C26Mwjem.js", "/frontend-assets/use-reduced-motion-DtGVGoV5.js", "/frontend-assets/index-CxO9y2oK.js", "/frontend-assets/useRequest-Jxg2S_l5.js", "/frontend-assets/client-only-3gf2CKHz.js", "/frontend-assets/proxy-kx7hH_pw.js", "/frontend-assets/avatar-menu-BukIyJn1.js", "/frontend-assets/index-C99v8T86.js", "/frontend-assets/index-DUXAY9fp.js", "/frontend-assets/ArrowRight-K7Ojub1A.js", "/frontend-assets/IconBase-aPY_BkEt.js", "/frontend-assets/4CMBR7SL-BalhXzPm.js", "/frontend-assets/index-RIBYm-Q5.js", "/frontend-assets/root-8eZD8nXZ.js", "/frontend-assets/logo-glyph-J3lYGwuy.js", "/frontend-assets/use-in-view-DEgJ9Ptc.js", "/frontend-assets/isBrowser-C5DMOrl5.js", "/frontend-assets/debounce-CbIcyUWR.js", "/frontend-assets/index-DkjZEnzH.js", "/frontend-assets/string-mMrxK_zr.js", "/frontend-assets/index-C39VWG4p.js", "/frontend-assets/index-D25pJ9J9.js" ], "css": [ "/frontend-assets/home-D5dMP9LA.css" ] }, "routes/blog/index": { "id": "routes/blog/index", "parentId": "routes/blog/layout", "index": true, "hasAction": false, "hasLoader": false, "hasClientAction": false, "hasClientLoader": false, "hasErrorBoundary": false, "module": "/assets/index-BlksMu6e.js", "imports": [ "/frontend-assets/with-props-Brf4Fy-b.js", "/frontend-assets/chunk-HA7DTUK3-BVDp7VCB.js", "/frontend-assets/Rss-DYE0fTK3.js", "/frontend-assets/index-VObRRx9T.js", "/frontend-assets/index-BKyjhPpS.js", "/frontend-assets/logo-glyph-J3lYGwuy.js", "/frontend-assets/og-image-ARUK3wQt.js", "/assets/redirect-DfbXaSx1.js", "/frontend-assets/IconBase-aPY_BkEt.js", "/frontend-assets/index-C99v8T86.js", "/frontend-assets/index-C39VWG4p.js" ], "css": [] } }, "url": "/frontend-assets/manifest-53943689.js", "version": "53943689" }; window.__reactRouterRouteModules = {"root":route0,"routes/blog/layout":route1,"routes/blog/detail":route2,"content/blog/fine-tune-llama-2":route3}; import("/assets/entry.client-BwK8caee.js");</script><div id="_rht_toaster" style="position:fixed;z-index:9999;top:16px;left:16px;right:16px;bottom:16px;pointer-events:none"></div><script>((storageKey2, restoreKey) => { if (!window.history.state || !window.history.state.key) { let key = Math.random().toString(32).slice(2); window.history.replaceState({ key }, ""); } try { let positions = JSON.parse(sessionStorage.getItem(storageKey2) || "{}"); let storedY = positions[restoreKey || window.history.state.key]; if (typeof storedY === "number") { window.scrollTo(0, storedY); } } catch (error) { console.error(error); sessionStorage.removeItem(storageKey2); } })("react-router-scroll-positions", null)</script></body></html><!--$?--><template id="B:0"></template><!--/$--><div hidden id="S:0"><script>window.__reactRouterContext.streamController.enqueue("[{\"_1\":2,\"_702\":-5,\"_703\":-5},\"loaderData\",{\"_3\":4,\"_24\":25,\"_699\":700,\"_701\":-5},\"root\",{\"_5\":-5,\"_6\":-5,\"_7\":8,\"_15\":16},\"theme\",\"user\",\"preferences\",{\"_9\":10,\"_11\":12,\"_13\":14},\"DOCS_SIDEBAR\",\"DOCS_SIDEBAR_EXPANDED\",\"PREDICTIONS_SIDEBAR\",\"EXPANDED\",\"DOCS_LANGUAGE\",\"\",\"env\",{\"_17\":-7,\"_18\":19,\"_20\":21,\"_22\":23},\"REPLICATE_API_BASE_URL\",\"REPLICATE_WEB_URL\",\"https://replicate.com\",\"RUDDERSTACK_DATA_PLANE_URL\",\"https://replicateor.dataplane.rudderstack.com\",\"RUDDERSTACK_WRITE_KEY\",\"2jo1cJ94MtaK9LTOXN6DyxpBmNA\",\"routes/blog/layout\",{\"_26\":27},\"posts\",[28,49,57,66,74,82,90,99,108,114,122,130,138,146,154,161,168,176,183,191,196,204,211,219,226,233,240,248,254,262,267,273,281,288,295,302,309,316,324,331,339,347,353,361,370,380,388,397,405,413,421,430,439,447,455,463,471,479,485,495,504,512,520,528,536,544,552,560,568,575,582,590,598,606,614,622,630,638,646,655,663,671,678,685,692],{\"_29\":30,\"_31\":32,\"_33\":34,\"_35\":36,\"_37\":38,\"_43\":44,\"_45\":46,\"_47\":48},\"slug\",\"fine-tune-video\",\"title\",\"You can now fine-tune open-source video models\",\"intro\",\"Train your own versions of Tencent's HunyuanVideo for style, motion, and characters on Replicate.\",\"image\",\"assets/blog/fine-tune-video/fine-tune-video-cover.png\",\"authors\",[39,40,41,42],\"zsxkib\",\"zeke\",\"deepfates\",\"bfirsh\",\"publishedAt\",\"2025-01-24T00:00:00.000Z\",\"publishedAtDisplay\",\"January 24, 2025\",\"unlisted\",false,{\"_29\":50,\"_31\":51,\"_33\":52,\"_35\":53,\"_37\":54,\"_43\":55,\"_45\":56,\"_47\":48},\"generate-videos-with-playground\",\"Generate short videos with the Replicate playground\",\"Create AI videos with a convenient workflow.\",\"assets/blog/generate-videos-with-playground/generate-videos-cover.webp\",[41],\"2025-01-17T00:00:00.000Z\",\"January 17, 2025\",{\"_29\":58,\"_31\":59,\"_33\":60,\"_35\":61,\"_37\":62,\"_43\":64,\"_45\":65,\"_47\":48},\"ai-video-is-having-its-stable-diffusion-moment\",\"AI video is having its Stable Diffusion moment\",\"There are lots of models that are as good as OpenAI's Sora now.\",\"assets/blog/ai-video/ai-video-cover.webp\",[63],\"fofr\",\"2024-12-16\",\"December 16, 2024\",{\"_29\":67,\"_31\":68,\"_33\":69,\"_35\":70,\"_37\":71,\"_43\":72,\"_45\":73,\"_47\":48},\"fast-flux-fine-tunes\",\"FLUX fine-tunes are now fast\",\"We've made running fine-tunes on Replicate much faster, and the optimizations are open-source.\",\"assets/blog/fast-flux-fine-tunes/out-0-11.webp\",[42],\"2024-11-26T00:00:00.000Z\",\"November 26, 2024\",{\"_29\":75,\"_31\":76,\"_33\":77,\"_35\":78,\"_37\":79,\"_43\":80,\"_45\":81,\"_47\":48},\"flux-tools\",\"FLUX.1 Tools – Control and steerability for FLUX\",\"A new set of image generation capabilities for FLUX models, including inpainting, outpainting, canny edge detection, and depth maps.\",\"assets/blog/flux-tools/cover.png\",[40,63],\"2024-11-21T00:00:00.000Z\",\"November 21, 2024\",{\"_29\":83,\"_31\":84,\"_33\":85,\"_35\":86,\"_37\":87,\"_43\":88,\"_45\":89,\"_47\":48},\"nvidia-l40s-gpus-are-here\",\"NVIDIA L40S GPUs are here\",\"NVIDIA L40S GPUs are here, with better performance and lower cost.\",\"assets/blog/nvidia-l40s-gpus-are-here/cover.png\",[40],\"2024-11-15T00:00:00.000Z\",\"November 15, 2024\",{\"_29\":91,\"_31\":92,\"_33\":93,\"_35\":-7,\"_37\":94,\"_43\":97,\"_45\":98,\"_47\":48},\"data-urls-in-our-sync-api\",\"We messed up: data URLs in our sync API\",\"We've decided to stop returning data URLs in sync API outputs based on feedback from users. We're going to take a few steps back and figure out the best way to get your model output as fast as possible.\",[95,96],\"nickstenning\",\"evilstreak\",\"2024-10-31T00:00:00.000Z\",\"October 31, 2024\",{\"_29\":100,\"_31\":101,\"_33\":102,\"_35\":103,\"_37\":104,\"_43\":106,\"_45\":107,\"_47\":48},\"ideogram-v2-inpainting\",\"Ideogram v2 is an outstanding new inpainting model\",\"We've partnered with Ideogram to bring their inpainting model to Replicate's API.\",\"assets/blog/ideogram-v2-inpainting/cover.png\",[105],\"andreasjansson\",\"2024-10-22T00:00:00.000Z\",\"October 22, 2024\",{\"_29\":109,\"_31\":110,\"_33\":111,\"_35\":112,\"_37\":113,\"_43\":106,\"_45\":107,\"_47\":48},\"stable-diffusion-3-5-is-here\",\"Stable Diffusion 3.5 is here\",\"Stability AI's latest text-to-image model is now available on Replicate and you can run it with an API.\",\"assets/blog/stable-diffusion-3-5-is-here/cover.webp\",[41],{\"_29\":115,\"_31\":116,\"_33\":117,\"_35\":118,\"_37\":119,\"_43\":120,\"_45\":121,\"_47\":48},\"flux-is-fast-and-open-source\",\"FLUX is fast and it's open source\",\"FLUX is now much faster on Replicate, and we’ve made our optimizations open-source so you can see exactly how they work and build upon them.\",\"assets/blog/flux-is-fast/cover.png\",[42],\"2024-10-10T00:00:00.000Z\",\"October 10, 2024\",{\"_29\":123,\"_31\":124,\"_33\":125,\"_35\":126,\"_37\":127,\"_43\":128,\"_45\":129,\"_47\":48},\"flux-1-1-pro-is-here\",\"FLUX1.1 [pro] is here\",\"Black Forest Labs continue to push boundaries with their latest release of FLUX.1 image generation model.\",\"assets/blog/flux-1-1-pro-is-here/cover.webp\",[40],\"2024-10-03T00:00:00.000Z\",\"October 3, 2024\",{\"_29\":131,\"_31\":132,\"_33\":133,\"_35\":134,\"_37\":135,\"_43\":136,\"_45\":137,\"_47\":48},\"using-synthetic-data-to-improve-flux-finetunes\",\"Using synthetic training data to improve Flux finetunes\",\"It's easy to fine-tune Flux, but sometimes you need to do a little more work to get the best results. This post covers techniques you can use to improve your fine-tuned Flux models.\",\"assets/blog/using-synthetic-data-to-improve-flux-finetunes/cover.jpg\",[40],\"2024-09-20T00:00:00.000Z\",\"September 20, 2024\",{\"_29\":139,\"_31\":140,\"_33\":141,\"_35\":142,\"_37\":143,\"_43\":144,\"_45\":145,\"_47\":48},\"fine-tune-flux-with-an-api\",\"Fine-tune FLUX.1 with an API\",\"Create and run your own fine-tuned Flux models programmatically using Replicate's HTTP API.\",\"assets/blog/fine-tune-flux-with-an-api/cover.jpg\",[40],\"2024-09-09T00:00:00.000Z\",\"September 9, 2024\",{\"_29\":147,\"_31\":148,\"_33\":149,\"_35\":150,\"_37\":151,\"_43\":152,\"_45\":153,\"_47\":48},\"fine-tune-flux-with-faces\",\"Fine-tune FLUX.1 to create images of yourself\",\"Create your own fine-tuned Flux model to generate new images of yourself.\",\"assets/blog/fine-tune-flux-with-faces/cover.jpg\",[40],\"2024-08-30T00:00:00.000Z\",\"August 30, 2024\",{\"_29\":155,\"_31\":156,\"_33\":157,\"_35\":-7,\"_37\":158,\"_43\":159,\"_45\":160,\"_47\":48},\"replicate-intelligence-2024-08-23\",\"Replicate Intelligence #12\",\"Flux LoRAs, Hot Zuck, and Replicate on Lex Fridman\",[41],\"2024-08-23T00:00:00.000Z\",\"August 23, 2024\",{\"_29\":162,\"_31\":163,\"_33\":164,\"_35\":-7,\"_37\":165,\"_43\":166,\"_45\":167,\"_47\":48},\"replicate-intelligence-2024-08-16\",\"Replicate Intelligence #11\",\"Fine tune FLUX.1, generative video games, a vision for the metaverse\",[41],\"2024-08-16T00:00:00.000Z\",\"August 16, 2024\",{\"_29\":169,\"_31\":170,\"_33\":171,\"_35\":172,\"_37\":173,\"_43\":174,\"_45\":175,\"_47\":48},\"fine-tune-flux\",\"Fine-tune FLUX.1 with your own images\",\"We've added fine-tuning (LoRA) support to FLUX.1 image generation models. You can train FLUX.1 on your own images with one line of code using Replicate's API.\",\"assets/blog/fine-tune-flux/cover.jpg\",[41],\"2024-08-15T00:00:00.000Z\",\"August 15, 2024\",{\"_29\":177,\"_31\":178,\"_33\":179,\"_35\":-7,\"_37\":180,\"_43\":181,\"_45\":182,\"_47\":48},\"replicate-intelligence-2024-08-09\",\"Replicate Intelligence #10\",\"Flux developments, Minecraft bot, Streamlit cookbook with Zeke\",[41],\"2024-08-09T00:00:00.000Z\",\"August 9, 2024\",{\"_29\":184,\"_31\":185,\"_33\":186,\"_35\":187,\"_37\":188,\"_43\":189,\"_45\":190,\"_47\":48},\"flux-first-impressions\",\"FLUX.1: First Impressions\",\"We explore FLUX.1's unique strengths and aesthetics to see what we can generate.\",\"assets/blog/flux-first-impressions/library.webp\",[41],\"2024-08-02T00:00:00.000Z\",\"August 2, 2024\",{\"_29\":192,\"_31\":193,\"_33\":194,\"_35\":-7,\"_37\":195,\"_43\":189,\"_45\":190,\"_47\":48},\"replicate-intelligence-2024-08-02\",\"Replicate Intelligence #9\",\"Open source frontier image model, cut objects from videos, new Python web framework from Jeremy Howard\",[41],{\"_29\":197,\"_31\":198,\"_33\":199,\"_35\":200,\"_37\":201,\"_43\":202,\"_45\":203,\"_47\":48},\"flux-state-of-the-art-image-generation\",\"Run FLUX with an API\",\"FLUX.1 is a new text-to-image model from Black Forest Labs, the creators of Stable Diffusion, that exceeds the capabilities of previous open-source models.\",\"assets/blog/flux/cover.jpg\",[40,39],\"2024-08-01T00:00:00.000Z\",\"August 1, 2024\",{\"_29\":205,\"_31\":206,\"_33\":207,\"_35\":-7,\"_37\":208,\"_43\":209,\"_45\":210,\"_47\":48},\"replicate-intelligence-2024-07-26\",\"Replicate Intelligence #8\",\"A top-tier open-ish language model, new safety classifiers, model search API\",[41],\"2024-07-26T00:00:00.000Z\",\"July 26, 2024\",{\"_29\":212,\"_31\":213,\"_33\":214,\"_35\":215,\"_37\":216,\"_43\":217,\"_45\":218,\"_47\":48},\"run-llama-3-1-with-an-api\",\"Run Meta Llama 3.1 405B with an API\",\"Llama 3.1 405B: is the most powerful open-source language model from Meta. Learn how to run it in the cloud with one line of code.\",\"assets/blog/llama-3-api/meta.png\",[41],\"2024-07-23T00:00:00.000Z\",\"July 23, 2024\",{\"_29\":220,\"_31\":221,\"_33\":222,\"_35\":-7,\"_37\":223,\"_43\":224,\"_45\":225,\"_47\":48},\"replicate-intelligence-2024-07-12\",\"Replicate Intelligence #7\",\"Data curation, data generation, data data data\",[41],\"2024-07-12T00:00:00.000Z\",\"July 12, 2024\",{\"_29\":227,\"_31\":228,\"_33\":229,\"_35\":-7,\"_37\":230,\"_43\":231,\"_45\":232,\"_47\":48},\"replicate-intelligence-2024-06-28\",\"Replicate Intelligence #6\",\"Google's Gemma2 models, language model leaderboard, tips for Stable Diffusion 3\",[41],\"2024-06-28T00:00:00.000Z\",\"June 28, 2024\",{\"_29\":234,\"_31\":235,\"_33\":236,\"_35\":-7,\"_37\":237,\"_43\":238,\"_45\":239,\"_47\":48},\"replicate-intelligence-2024-06-21\",\"Replicate Intelligence #5\",\"Really good coding model, AI search breakthroughs, Discord support bot\",[41],\"2024-06-21T00:00:00.000Z\",\"June 21, 2024\",{\"_29\":241,\"_31\":242,\"_33\":243,\"_35\":244,\"_37\":245,\"_43\":246,\"_45\":247,\"_47\":48},\"get-the-best-from-stable-diffusion-3\",\"How to get the best results from Stable Diffusion 3\",\"We show you how to use Stable Diffusion 3 to get the best images, including new techniques for prompting.\",\"assets/blog/get-the-best-from-stable-diffusion-3/get-the-best-from-stable-diffusion-3.webp\",[63],\"2024-06-18T00:00:00.000Z\",\"June 18, 2024\",{\"_29\":249,\"_31\":250,\"_33\":251,\"_35\":252,\"_37\":253,\"_43\":246,\"_45\":247,\"_47\":48},\"run-stable-diffusion-3-on-apple-silicon-mac\",\"Run Stable Diffusion 3 on your Apple Silicon Mac\",\"A step-by-step guide to generating images with Stable Diffusion 3 on your M-series Mac using MPS acceleration.\",\"assets/blog/sd3-mac-mps/cover.webp\",[39],{\"_29\":255,\"_31\":256,\"_33\":257,\"_35\":258,\"_37\":259,\"_43\":260,\"_45\":261,\"_47\":48},\"push-a-custom-sd3\",\"Push a custom version of Stable Diffusion 3\",\"Create your own custom version of Stability's latest image generation model and run it on Replicate via the web or API.\",\"assets/blog/push-a-custom-sd3/push-a-custom-sd3.webp\",[40],\"2024-06-14T00:00:00.000Z\",\"June 14, 2024\",{\"_29\":263,\"_31\":264,\"_33\":265,\"_35\":-7,\"_37\":266,\"_43\":260,\"_45\":261,\"_47\":48},\"replicate-intelligence-2024-06-14\",\"Replicate Intelligence #4\",\"Find concepts in GPT models, real-time speech to text in the browser, H100s are coming\",[41],{\"_29\":268,\"_31\":269,\"_33\":270,\"_35\":271,\"_37\":272,\"_43\":260,\"_45\":261,\"_47\":48},\"run-sd3-on-comfyui\",\"Run Stable Diffusion 3 on your own machine with ComfyUI\",\"Copy and paste a few commands into terminal to play with Stable Diffusion 3 on your own GPU-powered machine.\",\"assets/blog/sd3-comfyui-steps/comfyui-sd3.webp\",[39,40],{\"_29\":274,\"_31\":275,\"_33\":276,\"_35\":277,\"_37\":278,\"_43\":279,\"_45\":280,\"_47\":48},\"h100s-are-coming\",\"H100s are coming to Replicate\",\"We'll soon support NVIDIA's H100 GPUs for predictions and training. Let us know if you want early access.\",\"assets/blog/h100s-are-coming/h100s-are-coming.webp\",[40],\"2024-06-12T00:00:00.000Z\",\"June 12, 2024\",{\"_29\":282,\"_31\":283,\"_33\":284,\"_35\":285,\"_37\":286,\"_43\":279,\"_45\":280,\"_47\":48},\"run-stable-diffusion-3-with-an-api\",\"Run Stable Diffusion 3 with an API\",\"Stable Diffusion 3 is the latest text-to-image model from Stability, with improved image quality, typography, prompt understanding, and resource efficiency. Learn how to run it in the cloud with one line of code.\",\"assets/blog/stable-diffusion-3-api/stability-ai.webp\",[287],\"cbh123\",{\"_29\":289,\"_31\":290,\"_33\":291,\"_35\":-7,\"_37\":292,\"_43\":293,\"_45\":294,\"_47\":48},\"replicate-intelligence-2024-06-07\",\"Replicate Intelligence #3\",\"Garden State Llama, applied LLMs guide, real-time image generation\",[41],\"2024-06-07T00:00:00.000Z\",\"June 7, 2024\",{\"_29\":296,\"_31\":297,\"_33\":298,\"_35\":-7,\"_37\":299,\"_43\":300,\"_45\":301,\"_47\":48},\"replicate-intelligence-2024-05-31\",\"Replicate Intelligence #2\",\"Faster image generation, AI-powered world simulator, insights on AI dataset complexity\",[41],\"2024-05-31T00:00:00.000Z\",\"May 31, 2024\",{\"_29\":303,\"_31\":304,\"_33\":305,\"_35\":-7,\"_37\":306,\"_43\":307,\"_45\":308,\"_47\":48},\"replicate-intelligence-2024-05-24\",\"Replicate Intelligence #1\",\"DIY Llama 3 implementation, open-source smart glasses, steering language models with dictionary learning\",[41],\"2024-05-24T00:00:00.000Z\",\"May 24, 2024\",{\"_29\":310,\"_31\":311,\"_33\":-7,\"_35\":-7,\"_37\":312,\"_43\":314,\"_45\":315,\"_47\":48},\"shared-network-vulnerability-disclosure\",\"Shared network vulnerability disclosure\",[95,313,40],\"philandstuff\",\"2024-05-23T00:00:00.000Z\",\"May 23, 2024\",{\"_29\":317,\"_31\":318,\"_33\":319,\"_35\":320,\"_37\":321,\"_43\":322,\"_45\":323,\"_47\":48},\"run-arctic-with-an-api\",\"Run Snowflake Arctic with an API\",\"Arctic is a new open-source language model from Snowflake. Learn how to run it in the cloud with one line of code.\",\"assets/blog/arctic-api/snowflake_arctic.png\",[287],\"2024-04-23T00:00:00.000Z\",\"April 23, 2024\",{\"_29\":325,\"_31\":326,\"_33\":327,\"_35\":215,\"_37\":328,\"_43\":329,\"_45\":330,\"_47\":48},\"run-llama-3-with-an-api\",\"Run Meta Llama 3 with an API\",\"Llama 3 is the latest language model from Meta. Learn how to run it in the cloud with one line of code.\",[287],\"2024-04-18T00:00:00.000Z\",\"April 18, 2024\",{\"_29\":332,\"_31\":333,\"_33\":334,\"_35\":335,\"_37\":336,\"_43\":337,\"_45\":338,\"_47\":48},\"run-codellama-with-an-api\",\"Run Code Llama 70B with an API\",\"Code Llama 70B is one of the powerful open-source code generation models. Learn how to run it in the cloud with one line of code.\",\"assets/blog/codellama/codellama.png\",[287],\"2024-01-30T00:00:00.000Z\",\"January 30, 2024\",{\"_29\":340,\"_31\":341,\"_33\":342,\"_35\":343,\"_37\":344,\"_43\":345,\"_45\":346,\"_47\":48},\"how-to-create-an-ai-narrator\",\"How to create an AI narrator for your life\",\"Or, how I met a virtual David Attenborough.\",\"assets/blog/ai-narrator/2023-12-01_at_11.58.52.png\",[287],\"2023-12-06T00:00:00.000Z\",\"December 6, 2023\",{\"_29\":348,\"_31\":349,\"_33\":350,\"_35\":351,\"_37\":352,\"_43\":345,\"_45\":346,\"_47\":48},\"how-to-tune-a-realistic-voice-clone\",\"Clone your voice using open-source models\",\"We’ve added fine-tuning for realistic voice cloning (RVC). You can train RVC on your own dataset from a YouTube video with a few lines of code using Replicate's API.\",\"assets/blog/how-to-tune-a-realistic-voice-clone/rvc.jpg\",[39,63],{\"_29\":354,\"_31\":355,\"_33\":356,\"_35\":357,\"_37\":358,\"_43\":359,\"_45\":360,\"_47\":48},\"series-b\",\"Businesses are building on open-source AI\",\"We've raised a $40 million Series B led by a16z.\",\"assets/blog/series-b/goo-logo.jpg\",[42],\"2023-12-05T00:00:00.000Z\",\"December 5, 2023\",{\"_29\":362,\"_31\":363,\"_33\":364,\"_35\":365,\"_37\":366,\"_43\":368,\"_45\":369,\"_47\":48},\"run-yi-chat-with-api\",\"How to run Yi chat models with an API\",\"The Yi series models are large language models trained from scratch by developers at 01.AI. Learn how to run them in the cloud with one line of code.\",\"assets/blog/run-yi-chat-with-api/Yi.svg\",[367],\"nateraw\",\"2023-11-23T00:00:00.000Z\",\"November 23, 2023\",{\"_29\":371,\"_31\":372,\"_33\":373,\"_35\":374,\"_37\":375,\"_43\":378,\"_45\":379,\"_47\":48},\"replicate-scaffold\",\"Scaffold Replicate apps with one command\",\"We've added a CLI command that makes it easy to get started with Replicate.\",\"assets/blog/ready-set-scaffold/scaffold-llama.jpg\",[376,377],\"jakedahn\",\"mattrothenberg\",\"2023-11-22T00:00:00.000Z\",\"November 22, 2023\",{\"_29\":381,\"_31\":382,\"_33\":383,\"_35\":384,\"_37\":385,\"_43\":386,\"_45\":387,\"_47\":48},\"run-bge-embedding-models\",\"Using open-source models for faster and cheaper text embeddings\",\"An interactive example showing how to embed text using a state-of-the-art embedding model that beats OpenAI's embeddings API on price and performance.\",\"assets/blog/run-bge-embedding-models/cover.jpg\",[367],\"2023-11-10T00:00:00.000Z\",\"November 10, 2023\",{\"_29\":389,\"_31\":390,\"_33\":391,\"_35\":392,\"_37\":393,\"_43\":395,\"_45\":396,\"_47\":48},\"generate-music-from-chord-progressions-musicgen-chord\",\"Generate music from chord progressions and text prompts with MusicGen-Chord\",\"We’ve added chord conditioning to Meta’s MusicGen model, so you can create automatic backing tracks in any style using text prompts and chord progressions.\",\"assets/blog/generate-music-from-chord-progressions-musicgen-chord/cover.png\",[394],\"sakemin\",\"2023-11-08T00:00:00.000Z\",\"November 8, 2023\",{\"_29\":398,\"_31\":399,\"_33\":400,\"_35\":401,\"_37\":402,\"_43\":403,\"_45\":404,\"_47\":48},\"run-latent-consistency-model-on-mac\",\"Generate images in one second on your Mac using a latent consistency model\",\"How to run a latent consistency model on your M1 or M2 Mac\",\"assets/blog/run-latent-consistency-model-on-mac/cover.webp\",[63],\"2023-10-25T00:00:00.000Z\",\"October 25, 2023\",{\"_29\":406,\"_31\":407,\"_33\":408,\"_35\":409,\"_37\":410,\"_43\":411,\"_45\":412,\"_47\":48},\"how-to-use-rag-with-chromadb-and-mistral-7b-instruct\",\"How to use retrieval augmented generation with ChromaDB and Mistral\",\"In this post we'll explore the basics of retrieval augmented generation by creating an example app that uses bge-large-en for embeddings, ChromaDB for vector store, and mistral-7b-instruct for language model generation.\",\"assets/blog/how-to-use-rag-with-chromadb-and-mistral-7b-instruct/rag-cover-image.webp\",[376],\"2023-10-17T00:00:00.000Z\",\"October 17, 2023\",{\"_29\":414,\"_31\":415,\"_33\":416,\"_35\":417,\"_37\":418,\"_43\":419,\"_45\":420,\"_47\":48},\"fine-tune-musicgen\",\"Fine-tune MusicGen to generate music in any style\",\"We’ve added fine-tuning support to MusicGen. You can train the small, medium and melody models on your own audio files using Replicate.\",\"assets/blog/fine-tune-musicgen/fine-tune-musicgen.webp\",[63,394],\"2023-10-13T00:00:00.000Z\",\"October 13, 2023\",{\"_29\":422,\"_31\":423,\"_33\":424,\"_35\":425,\"_37\":426,\"_43\":428,\"_45\":429,\"_47\":48},\"llama-2-grammars\",\"Jet-setting with Llama 2 + Grammars\",\"How to use Llama 2 models with grammars for information extraction tasks.\",\"assets/blog/llama-2-grammars/llama2-grammar.webp\",[427],\"mattt\",\"2023-10-09T00:00:00.000Z\",\"October 9, 2023\",{\"_29\":431,\"_31\":432,\"_33\":433,\"_35\":434,\"_37\":435,\"_43\":437,\"_45\":438,\"_47\":48},\"run-mistral-7b-with-api\",\"How to run Mistral 7B with an API\",\"Mistral 7B is an open-source large language model. Learn what it's good at and how to run it in the cloud with one line of code.\",\"assets/blog/demystifying-mistral-7b/mistral_box.jpeg\",[436,40],\"daanelson\",\"2023-10-06T00:00:00.000Z\",\"October 6, 2023\",{\"_29\":440,\"_31\":441,\"_33\":442,\"_35\":443,\"_37\":444,\"_43\":445,\"_45\":446,\"_47\":48},\"animatediff-interpolator\",\"Make smooth AI generated videos with AnimateDiff and an interpolator\",\"Combine AnimateDiff and the ST-MFNet frame interpolator to create smooth and realistic videos from a text prompt\",\"assets/blog/animatediff-interpolator/animatediff.webp\",[63,39],\"2023-10-04T00:00:00.000Z\",\"October 4, 2023\",{\"_29\":448,\"_31\":449,\"_33\":450,\"_35\":451,\"_37\":452,\"_43\":453,\"_45\":454,\"_47\":48},\"fine-tune-cold-boots\",\"Fine-tuned models now boot in less than one second\",\"We've made some dramatic improvements to cold boots for fine-tuned models.\",\"assets/blog/fine-tune-cold-boots/fast-llamas.webp\",[105],\"2023-09-06T00:00:00.000Z\",\"September 6, 2023\",{\"_29\":456,\"_31\":457,\"_33\":458,\"_35\":459,\"_37\":460,\"_43\":461,\"_45\":462,\"_47\":48},\"painting-with-words-a-history-of-text-to-image-ai\",\"Painting with words: a history of text-to-image AI\",\"With the recent release of Stable Diffusion XL fine-tuning on Replicate, and today being the 1-year anniversary of Stable Diffusion, now feels like the perfect opportunity to take a step back and reflect on how text-to-image AI has improved over the last few years.\",\"assets/blog/painting-with-words-a-history-of-text-to-image-ai/sketch_of_internet_is_a_series_of_tubes_by_Leonardo_da_Vinci.png\",[376],\"2023-08-22T00:00:00.000Z\",\"August 22, 2023\",{\"_29\":464,\"_31\":465,\"_33\":466,\"_35\":467,\"_37\":468,\"_43\":469,\"_45\":470,\"_47\":48},\"cutting-prices-in-half\",\"We're cutting our prices in half\",\"The price of public models is being cut in half, and soon we'll start charging new users for setup and idle time on private models.\",\"assets/blog/cutting-prices-in-half/out-3-1.png\",[42],\"2023-08-16T00:00:00.000Z\",\"August 16, 2023\",{\"_29\":472,\"_31\":473,\"_33\":474,\"_35\":475,\"_37\":476,\"_43\":477,\"_45\":478,\"_47\":48},\"how-to-prompt-llama\",\"A guide to prompting Llama 2\",\"Learn the art of the Llama prompt.\",\"assets/blog/prompt-llama/llama-typing.png\",[287],\"2023-08-14T00:00:00.000Z\",\"August 14, 2023\",{\"_29\":480,\"_31\":481,\"_33\":482,\"_35\":483,\"_37\":484,\"_43\":477,\"_45\":478,\"_47\":48},\"streaming\",\"Streaming output for language models\",\"Our API now supports server-sent event streams for language models. Learn how to use them to make your apps more responsive.\",\"assets/blog/streaming/streaming.jpg\",[40],{\"_29\":486,\"_31\":487,\"_33\":488,\"_35\":489,\"_37\":490,\"_43\":493,\"_45\":494,\"_47\":48},\"fine-tune-sdxl\",\"Fine-tune SDXL with your own images\",\"We’ve added fine-tuning (Dreambooth, Textual Inversion and LoRA) support to SDXL 1.0. You can train SDXL on your own images with one line of code using the Replicate API.\",\"assets/blog/fine-tune-sdxl/out-3-8.png\",[105,491,492,436],\"anotherjesse\",\"cloneofsimo\",\"2023-08-08T00:00:00.000Z\",\"August 8, 2023\",{\"_29\":496,\"_31\":497,\"_33\":498,\"_35\":499,\"_37\":500,\"_43\":502,\"_45\":503,\"_47\":48},\"run-llama-2-with-an-api\",\"Run Llama 2 with an API\",\"Llama 2 is the first open source language model of the same caliber as OpenAI’s models. Learn how to run it in the cloud with one line of code.\",\"assets/blog/llama-api/llama-clouds.png\",[501],\"joehoover\",\"2023-07-27T00:00:00.000Z\",\"July 27, 2023\",{\"_29\":505,\"_31\":506,\"_33\":507,\"_35\":508,\"_37\":509,\"_43\":510,\"_45\":511,\"_47\":48},\"run-sdxl-with-an-api\",\"Run SDXL with an API\",\"How to run Stable Diffusion XL 1.0 using the Replicate API\",\"assets/blog/run-sdxl-with-an-api/astronaut-riding-unicorn.webp\",[63],\"2023-07-26T00:00:00.000Z\",\"July 26, 2023\",{\"_29\":513,\"_31\":514,\"_33\":515,\"_35\":516,\"_37\":517,\"_43\":518,\"_45\":519,\"_47\":48},\"run-llama-locally\",\"A comprehensive guide to running Llama 2 locally\",\"How to run Llama 2 on Mac, Linux, Windows, and your phone.\",\"assets/blog/run-llama-locally/llama-captain.jpg\",[40],\"2023-07-22T00:00:00.000Z\",\"July 22, 2023\",{\"_29\":521,\"_31\":522,\"_33\":523,\"_35\":524,\"_37\":525,\"_43\":526,\"_45\":527,\"_47\":48},\"fine-tune-llama-2\",\"Fine-tune Llama 2 on Replicate\",\"So you want to train a llama...\",\"assets/blog/fine-tune-llama-2/llama-salon.png\",[287],\"2023-07-20T00:00:00.000Z\",\"July 20, 2023\",{\"_29\":529,\"_31\":530,\"_33\":531,\"_35\":532,\"_37\":533,\"_43\":534,\"_45\":535,\"_47\":48},\"llama-2-roundup\",\"What happened with Llama 2 in the last 24 hours? 🦙\",\"A roundup of recent developments from the llamaverse following the second major release of Meta's open-source large language model.\",\"assets/blog/llama-2-roundup/llama2.jpg\",[287,40],\"2023-07-19T00:00:00.000Z\",\"July 19, 2023\",{\"_29\":537,\"_31\":538,\"_33\":539,\"_35\":540,\"_37\":541,\"_43\":542,\"_45\":543,\"_47\":48},\"turn-your-llm-into-a-poet\",\"Make any large language model a better poet\",\"Prompt engineering and training are often the first solutions we reach for to improve language model behavior, but they're not the only way.\",\"assets/blog/poet/poet.jpg\",[501],\"2023-05-26T00:00:00.000Z\",\"May 26, 2023\",{\"_29\":545,\"_31\":546,\"_33\":547,\"_35\":548,\"_37\":549,\"_43\":550,\"_45\":551,\"_47\":48},\"new-status-page\",\"Status page\",\"We've added a status page to provide real-time updates on the health of Replicate.\",\"assets/blog/status-page/nines.jpg\",[95,40],\"2023-05-18T00:00:00.000Z\",\"May 18, 2023\",{\"_29\":553,\"_31\":554,\"_33\":555,\"_35\":556,\"_37\":557,\"_43\":558,\"_45\":559,\"_47\":48},\"language-model-roundup\",\"Language model roundup, April 2023\",\"A roundup of recent developments from the world of open-source language models.\",\"assets/blog/language-model-roundup/language-model-roundup.jpg\",[501,427,40],\"2023-04-21T00:00:00.000Z\",\"April 21, 2023\",{\"_29\":561,\"_31\":562,\"_33\":563,\"_35\":564,\"_37\":565,\"_43\":566,\"_45\":567,\"_47\":48},\"autocog\",\"AutoCog — Generate Cog configuration with GPT-4\",\"Give it a machine learning directory and AutoCog will create predict.py and cog.yaml until it successfully runs a prediction\",\"assets/blog/autocog/autocog.png\",[105],\"2023-04-19T00:00:00.000Z\",\"April 19, 2023\",{\"_29\":569,\"_31\":570,\"_33\":14,\"_35\":571,\"_37\":572,\"_43\":573,\"_45\":574,\"_47\":48},\"language-models\",\"Language models are on Replicate\",\"assets/blog/language-models/out-2.png\",[42],\"2023-04-05T00:00:00.000Z\",\"April 5, 2023\",{\"_29\":576,\"_31\":577,\"_33\":14,\"_35\":578,\"_37\":579,\"_43\":580,\"_45\":581,\"_47\":48},\"fine-tune-alpaca-with-lora\",\"How to use Alpaca-LoRA to fine-tune a model like ChatGPT\",\"assets/blog/fine-tune-alpaca-with-lora/alpaca-lora.jpg\",[105,436,40],\"2023-03-23T00:00:00.000Z\",\"March 23, 2023\",{\"_29\":583,\"_31\":584,\"_33\":585,\"_35\":586,\"_37\":587,\"_43\":588,\"_45\":589,\"_47\":48},\"llama-roundup\",\"Week 3 of LLaMA 🦙\",\"A roundup of recent developments from the llamaverse.\",\"assets/blog/llama-roundup/llama-astronaut.jpg\",[40],\"2023-03-18T00:00:00.000Z\",\"March 18, 2023\",{\"_29\":591,\"_31\":592,\"_33\":593,\"_35\":594,\"_37\":595,\"_43\":596,\"_45\":597,\"_47\":48},\"fine-tune-llama-to-speak-like-homer-simpson\",\"Fine-tune LLaMA to speak like Homer Simpson\",\"With a small amount of data and an hour of training you can make LLaMA output text in the voice of the dataset.\",\"assets/blog/homer/robot-homer.webp\",[42],\"2023-03-17T00:00:00.000Z\",\"March 17, 2023\",{\"_29\":599,\"_31\":600,\"_33\":601,\"_35\":602,\"_37\":603,\"_43\":604,\"_45\":605,\"_47\":48},\"replicate-alpaca\",\"Train and run Stanford Alpaca on your own machine\",\"We'll show you how to train Alpaca, a fine-tuned version of LLaMA that can respond to instructions like ChatGPT.\",\"assets/blog/replicate-alpaca/party-alpaca.png\",[40],\"2023-03-16T00:00:00.000Z\",\"March 16, 2023\",{\"_29\":607,\"_31\":608,\"_33\":609,\"_35\":610,\"_37\":611,\"_43\":612,\"_45\":613,\"_47\":48},\"machine-learning-needs-better-tools\",\"Machine learning needs better tools\",\"Lots of people want to build things with machine learning, but they don't have the expertise to use it.\",\"assets/blog/machine-learning-is-just-software/cover.png\",[42],\"2023-02-21T00:00:00.000Z\",\"February 21, 2023\",{\"_29\":615,\"_31\":616,\"_33\":617,\"_35\":618,\"_37\":619,\"_43\":620,\"_45\":621,\"_47\":48},\"lora-faster-fine-tuning-of-stable-diffusion\",\"Introducing LoRA: A faster way to fine-tune Stable Diffusion\",\"It's like DreamBooth, but much faster. And you can run it in the cloud on Replicate.\",\"assets/blog/lora-api/lora-api.png\",[492,105,491,40],\"2023-02-07T00:00:00.000Z\",\"February 7, 2023\",{\"_29\":623,\"_31\":624,\"_33\":625,\"_35\":626,\"_37\":627,\"_43\":628,\"_45\":629,\"_47\":48},\"dreambooth-api\",\"Train and deploy a DreamBooth model on Replicate\",\"With just a handful of images and a single API call, you can train a model, publish it to Replicate, and run predictions on it in the cloud.\",\"assets/blog/dreambooth-api/Untitled.png\",[42,40],\"2022-11-21T00:00:00.000Z\",\"November 21, 2022\",{\"_29\":631,\"_31\":632,\"_33\":633,\"_35\":634,\"_37\":635,\"_43\":636,\"_45\":637,\"_47\":48},\"run-stable-diffusion-on-m1-mac\",\"Run Stable Diffusion on your M1 Mac’s GPU\",\"How to run Stable Diffusion locally so you can hack on it\",\"assets/blog/run-stable-diffusion-on-m1-mac/grid-0002.png\",[42],\"2022-08-31T00:00:00.000Z\",\"August 31, 2022\",{\"_29\":639,\"_31\":640,\"_33\":641,\"_35\":642,\"_37\":643,\"_43\":644,\"_45\":645,\"_47\":48},\"run-stable-diffusion-with-an-api\",\"Run Stable Diffusion with an API\",\"How to use Replicate to integrate Stable Diffusion into hacks, apps, and projects\",\"assets/blog/run-stable-diffusion-with-an-api/out-0-3.png\",[40],\"2022-08-29T00:00:00.000Z\",\"August 29, 2022\",{\"_29\":647,\"_31\":648,\"_33\":649,\"_35\":650,\"_37\":651,\"_43\":653,\"_45\":654,\"_47\":48},\"build-a-robot-artist-for-your-discord-server-with-stable-diffusion\",\"Build a robot artist for your Discord server with Stable Diffusion, Replicate, and Fly.io\",\"A tutorial for building a chat bot that replies to prompts with the output of a text-to-image model.\",\"assets/blog/discord-bot/dreaming-of-rabbits.png\",[652,96,42],\"zeke,\",\"2022-08-25T00:00:00.000Z\",\"August 25, 2022\",{\"_29\":656,\"_31\":657,\"_33\":658,\"_35\":-7,\"_37\":659,\"_43\":661,\"_45\":662,\"_47\":48},\"uncanny-spaces\",\"Join us at Uncanny Spaces\",\"We're bringing people together to explore what's being created with machine learning.\",[660],\"rossjillian\",\"2022-08-11T00:00:00.000Z\",\"August 11, 2022\",{\"_29\":664,\"_31\":665,\"_33\":666,\"_35\":-7,\"_37\":667,\"_43\":669,\"_45\":670,\"_47\":48},\"grab-hundreds-of-images-with-clip-and-laion\",\"Automating image collection\",\"Using CLIP and LAION5B to collect thousands of captioned images.\",[668],\"afiaka87\",\"2022-08-05T00:00:00.000Z\",\"August 5, 2022\",{\"_29\":672,\"_31\":673,\"_33\":674,\"_35\":-7,\"_37\":675,\"_43\":676,\"_45\":677,\"_47\":48},\"exploring-text-to-image-models\",\"Exploring text to image models\",\"The basics of using the API to create your own images from text.\",[668,660],\"2022-07-18T00:00:00.000Z\",\"July 18, 2022\",{\"_29\":679,\"_31\":680,\"_33\":681,\"_35\":-7,\"_37\":682,\"_43\":683,\"_45\":684,\"_47\":48},\"model-docs\",\"A new template for model READMEs\",\"Inspired by model cards, we've created templates for documenting models on Replicate.\",[660,40],\"2022-07-05T00:00:00.000Z\",\"July 5, 2022\",{\"_29\":686,\"_31\":687,\"_33\":688,\"_35\":-7,\"_37\":689,\"_43\":690,\"_45\":691,\"_47\":48},\"constraining-clipdraw\",\"Constraining CLIPDraw\",\"An introduction to differentiable programming and the process of refining generative art models.\",[96],\"2022-05-27T00:00:00.000Z\",\"May 27, 2022\",{\"_29\":693,\"_31\":694,\"_33\":695,\"_35\":-7,\"_37\":696,\"_43\":697,\"_45\":698,\"_47\":48},\"hello-world\",\"Hello, world!\",\"We're a small team of engineers and machine learning enthusiasts working to make machine learning more accessible.\",[96,40],\"2022-05-16T00:00:00.000Z\",\"May 16, 2022\",\"routes/blog/detail\",{\"_29\":521},\"content/blog/fine-tune-llama-2\",\"actionData\",\"errors\"]\n");</script><!--$?--><template id="B:1"></template><!--/$--></div><script>function $RC(a,b){a=document.getElementById(a);b=document.getElementById(b);b.parentNode.removeChild(b);if(a){a=a.previousSibling;var f=a.parentNode,c=a.nextSibling,e=0;do{if(c&&8===c.nodeType){var d=c.data;if("/$"===d)if(0===e)break;else e--;else"$"!==d&&"$?"!==d&&"$!"!==d||e++}d=c.nextSibling;f.removeChild(c);c=d}while(c);for(;b.firstChild;)f.insertBefore(b.firstChild,c);a.data="$";a._reactRetry&&a._reactRetry()}};$RC("B:0","S:0")</script><div hidden id="S:1"><script>window.__reactRouterContext.streamController.close();</script></div><script>$RC("B:1","S:1")</script>

Pages: 1 2 3 4 5 6 7 8 9 10