Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 11 additions & 7 deletions .env.example
Original file line number Diff line number Diff line change
@@ -1,8 +1,12 @@
ENVIRONMENT=development
RACK_ENV=production
ENVIRONMENT=production
PORT=4567
OPENAI_ACCESS_TOKEN=your-token
MODEL=gpt-4o
FEED_TITLE=your-title
FEED_LINK=your-feed-channel-link
FEED_DOMAIN=your-domain.com
FEED_DESCRIPTION=awesome-description
FEED_TITLE="Brian: achris RSS Feed"
FEED_LINK=https://achris.me
FEED_DOMAIN=brian.achris.me
FEED_DESCRIPTION=AI-Generated RSS Feeds from books I would like to learn more about
TEXT_MODEL=gemini-2.5-flash
TEXT_PROVIDER=google
AUDIO_MODEL=gemini-2.5-flash-preview-tts
AUDIO_PROVIDER=google
ACCESS_TOKEN=my_access_token
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ FROM ruby:3.4.4-alpine
WORKDIR /app

RUN touch /app/cron.log && chmod 644 /app/cron.log
RUN echo "0 6 * * * cd /app && ruby task.rb 2>&1" | crontab -
RUN echo "0 6 * * * cd /app && ruby tasks/generate_feed.rb 2>&1" | crontab -

COPY --from=builder /usr/local/bundle/ /usr/local/bundle/
COPY . .
Expand Down
7 changes: 5 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -65,8 +65,11 @@ environment:
FEED_LINK: https://achris.me
FEED_DOMAIN: brian.achris.me
FEED_DESCRIPTION: AI-Generated RSS Feeds from books I would like to learn more about
MODEL: gpt-4o
OPENAI_ACCESS_TOKEN: your-openai-access-token
TEXT_MODEL: gemini-2.5-flash
TEXT_PROVIDER: google
AUDIO_MODEL: gemini-2.5-flash-preview-tts
AUDIO_PROVIDER: google
ACCESS_TOKEN: my_access_token
```

2. Configure your reading list in `history.json` by provinding the book title and its author, topics covered, and last updated date.
Expand Down
14 changes: 7 additions & 7 deletions brian.rb
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
require_relative "models/topic"

class Brian
MODEL = ENV["MODEL"]
TEXT_MODEL = ENV["TEXT_MODEL"]

SYSTEM_PROMPT = <<~TEXT
Create a detailed analysis of the topic chosen by the user, including the whys, how and what results.
Expand Down Expand Up @@ -52,7 +52,7 @@ def self.run(entry)
# @return [Topic, nil] Returns a Topic object or nil if an error occurs
#
def run(entry)
return development_topic if development_mode?
# return development_topic if development_mode?

new_topic = choose_new_topic(entry.book, entry.covered_topics)
if new_topic.nil? || new_topic.empty?
Expand All @@ -79,7 +79,7 @@ def run(entry)

private

def client = OPENAI_CLIENT
def client = AI_CLIENT

def development_mode?
ENV["ENVIRONMENT"] == "development"
Expand All @@ -99,7 +99,7 @@ def development_topic
def choose_new_topic(book, covered_topics)
response = client.chat(
parameters: {
model: MODEL,
model: "gemini-2.5-flash",
messages: [
{role: "user", content: pick_topic_prompt(book, covered_topics)}
],
Expand All @@ -115,7 +115,7 @@ def choose_new_topic(book, covered_topics)
def generate_post(book, topic)
client.chat(
parameters: {
model: MODEL,
model: TEXT_MODEL,
messages: [
{role: "system", content: SYSTEM_PROMPT},
{role: "user", content: user_prompt(book, topic)}
Expand All @@ -131,10 +131,10 @@ def generate_post(book, topic)
def generate_audio(text)
client.audio.speech(
parameters: {
model: "tts-1",
model: ENV["AUDIO_MODEL"],
input: text,
instructions: AUDIO_PROMPT,
voice: "shimmer",
voice: "charon",
response_format: "mp3",
speed: 1.0
}
Expand Down
46 changes: 36 additions & 10 deletions client.rb
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,39 @@

require "openai"

OPENAI_CLIENT = OpenAI::Client.new(
access_token: ENV["OPENAI_ACCESS_TOKEN"],
log_errors: true
)

# Available but not used right now
DEEPSEEK_CLIENT = OpenAI::Client.new(
access_token: ENV["DEEP_SEEK_ACCESS_TOKEN"],
uri_base: "https://api.deepseek.com/"
)
class AiClient
def initialize
text_uri_base =
case ENV["TEXT_PROVIDER"]
when "openai" then nil
when "google" then "https://generativelanguage.googleapis.com/v1beta/openai/"
when "deepseek" then "https://api.deepseek.com/"
else ENV["CUSTOM_TEXT_PROVIDER"]
end

@text_client = OpenAI::Client.new(
access_token: ENV["ACCESS_TOKEN"],
uri_base: text_uri_base,
log_errors: true
)

audio_uri_base =
case ENV["AUDIO_PROVIDER"]
when "openai" then nil
when "google" then "https://generativelanguage.googleapis.com/v1beta/openai/"
when "deepseek" then "https://api.deepseek.com/"
else ENV["CUSTOM_AUDIO_PROVIDER"]
end

@audio_client = OpenAI::Client.new(
access_token: ENV["ACCESS_TOKEN"],
uri_base: audio_uri_base,
log_errors: true
)
end

def chat(**) = @text_client.chat(**)
def audio(**) = @audio_client.audio(**)
end

AI_CLIENT = AiClient.new
91 changes: 91 additions & 0 deletions spec/client_spec.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
# frozen_string_literal: true

require "spec_helper"
require_relative "../client"

RSpec.describe AiClient do
let(:text_uri_base) { "https://text.example.com/" }
let(:audio_uri_base) { "https://audio.example.com/" }
let(:access_token) { "test_token" }

before do
# Mock ENV variables
allow(ENV).to receive(:[]).with("AI_TEXT_PROVIDER").and_return("custom")
allow(ENV).to receive(:[]).with("CUSTOM_TEXT_PROVIDER").and_return(text_uri_base)
allow(ENV).to receive(:[]).with("AI_AUDIO_PROVIDER").and_return("custom")
allow(ENV).to receive(:[]).with("CUSTOM_AUDIO_PROVIDER").and_return(audio_uri_base)
allow(ENV).to receive(:[]).with("ACCESS_TOKEN").and_return(access_token)

@audio_double = instance_double(OpenAI::Client)
@audio_speech_double = instance_double(OpenAI::Audio)

allow(@audio_double).to receive(:audio).and_return(@audio_speech_double)
allow(@audio_speech_double).to receive(:speech).and_return(true)

# Mock OpenAI::Client
@text_client_double = instance_double(OpenAI::Client)

allow(OpenAI::Client).to receive(:new).and_return(@text_client_double, @audio_double)
end

describe "#initialize" do
it "creates text client with correct parameters" do
AiClient.new

expect(OpenAI::Client).to have_received(:new).with(
access_token: access_token,
uri_base: text_uri_base,
log_errors: true
)
end

it "creates audio client with correct parameters" do
AiClient.new

expect(OpenAI::Client).to have_received(:new).with(
access_token: access_token,
uri_base: audio_uri_base,
log_errors: true
)
end
end

describe "#chat" do
it "forwards call to text client" do
client = AiClient.new
test_args = {
parameters: {
model: "test",
messages: [
{role: "system", content: "a"},
{role: "user", content: "b"}
],
temperature: 1
}
}

expect(@text_client_double).to receive(:chat).with(test_args)
client.chat(test_args)
end
end

describe "#audio" do
it "allows chained call to speech" do
client = AiClient.new
test_args = {
parameters: {
model: "tts-1",
input: "test",
instructions: "test",
voice: "shimmer",
response_format: "mp3",
speed: 1.0
}
}

expect(@audio_double).to receive(:audio)
expect(@audio_speech_double).to receive(:speech).with(test_args)
client.audio.speech(test_args)
end
end
end
2 changes: 1 addition & 1 deletion task.rb → tasks/generate_feed.rb
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
#

require "dotenv/load"
require_relative "feeder"
require_relative "../feeder"
require "debug" if ENV["ENVIRONMENT"] == "development"

Feeder.generate_feed
Expand Down