Compare commits
5 Commits
master
...
a_reply_to
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cc55967926 | ||
| 2d44aed5ac | |||
| 57f8e03984 | |||
|
|
5763495fea | ||
|
|
59c7f9cb9e |
@ -1,8 +1,5 @@
|
||||
name: Build and Push Image
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
on: [ push ]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@ -12,61 +9,34 @@ jobs:
|
||||
if: gitea.ref == 'refs/heads/master'
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Create Kubeconfig
|
||||
run: |
|
||||
mkdir $HOME/.kube
|
||||
echo "${{ secrets.KUBEC_CONFIG_BUILDX_NEW }}" > $HOME/.kube/config
|
||||
- name: Create Kubeconfig
|
||||
run: |
|
||||
mkdir $HOME/.kube
|
||||
echo "${{ secrets.KUBEC_CONFIG_BUILDX }}" > $HOME/.kube/config
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
driver: kubernetes
|
||||
driver-opts: |
|
||||
namespace=gitea-runner
|
||||
qemu.install=true
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
driver: kubernetes
|
||||
driver-opts: |
|
||||
namespace=gitea-runner
|
||||
qemu.install=true
|
||||
|
||||
- name: Login to Docker Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: git.aridgwayweb.com
|
||||
username: armistace
|
||||
password: ${{ secrets.REG_PASSWORD }}
|
||||
- name: Login to Docker Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: git.aridgwayweb.com
|
||||
username: armistace
|
||||
password: ${{ secrets.REG_PASSWORD }}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: |
|
||||
git.aridgwayweb.com/armistace/blog:latest
|
||||
|
||||
- name: Trivy Scan
|
||||
run: |
|
||||
echo "Installing Trivy "
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y wget apt-transport-https gnupg lsb-release
|
||||
wget -qO - https://aquasecurity.github.io/trivy-repo/deb/public.key | sudo apt-key add -
|
||||
echo deb https://aquasecurity.github.io/trivy-repo/deb $(lsb_release -sc) main | sudo tee -a /etc/apt/sources.list.d/trivy.list
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y trivy
|
||||
trivy image --format table --exit-code 1 --ignore-unfixed --vuln-type os,library --severity HIGH,CRITICAL git.aridgwayweb.com/armistace/blog:latest
|
||||
|
||||
- name: Deploy
|
||||
run: |
|
||||
echo "Installing Kubectl"
|
||||
apt-get update
|
||||
apt-get install -y apt-transport-https ca-certificates curl gnupg
|
||||
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.33/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
|
||||
chmod 644 /etc/apt/keyrings/kubernetes-apt-keyring.gpg
|
||||
echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.33/deb/ /' | tee /etc/apt/sources.list.d/kubernetes.list
|
||||
chmod 644 /etc/apt/sources.list.d/kubernetes.list
|
||||
apt-get update
|
||||
apt-get install kubectl
|
||||
kubectl delete namespace blog
|
||||
kubectl create namespace blog
|
||||
kubectl create secret docker-registry regcred --docker-server=${{ vars.DOCKER_SERVER }} --docker-username=${{ vars.DOCKER_USERNAME }} --docker-password='${{ secrets.DOCKER_PASSWORD }}' --docker-email=${{ vars.DOCKER_EMAIL }} --namespace=blog
|
||||
kubectl apply -f kube/blog_pod.yaml && kubectl apply -f kube/blog_deployment.yaml && kubectl apply -f kube/blog_service.yaml
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: |
|
||||
git.aridgwayweb.com/armistace/blog:latest
|
||||
|
||||
22
\
22
\
@ -1,22 +0,0 @@
|
||||
[core]
|
||||
repositoryformatversion = 0
|
||||
filemode = true
|
||||
bare = false
|
||||
logallrefupdates = true
|
||||
[remote "origin"]
|
||||
url = gitea@192.168.178.155:armistace/blog.git
|
||||
fetch = +refs/heads/*:refs/remotes/origin/*
|
||||
[branch "master"]
|
||||
remote = origin
|
||||
merge = refs/heads/master
|
||||
[branch "kube_deployment"]
|
||||
remote = origin
|
||||
merge = refs/heads/kube_deployment
|
||||
[branch "when_to_use_ai"]
|
||||
remote = origin
|
||||
merge = refs/heads/when_to_use_ai
|
||||
[pull]
|
||||
rebase = false
|
||||
[branch "an_actual_solution_to_the_social_media_ban"]
|
||||
remote = origin
|
||||
merge = refs/heads/an_actual_solution_to_the_social_media_ban
|
||||
@ -1,24 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: blog-deployment
|
||||
labels:
|
||||
app: blog
|
||||
namespace: blog
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: blog
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: blog
|
||||
spec:
|
||||
containers:
|
||||
- name: blog
|
||||
image: git.aridgwayweb.com/armistace/blog:latest
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
imagePullSecrets:
|
||||
- name: regcred
|
||||
@ -1,13 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: blog
|
||||
namespace: blog
|
||||
spec:
|
||||
containers:
|
||||
- name: blog
|
||||
image: git.aridgwayweb.com/armistace/blog:latest
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
imagePullSecrets:
|
||||
- name: regcred
|
||||
@ -1,13 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: blog-service
|
||||
namespace: blog
|
||||
spec:
|
||||
type: NodePort
|
||||
selector:
|
||||
app: blog
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8000
|
||||
nodePort: 30009
|
||||
77
src/content/a_reply_to_accelerating_australias_ai_agenda.md
Normal file
77
src/content/a_reply_to_accelerating_australias_ai_agenda.md
Normal file
@ -0,0 +1,77 @@
|
||||
Okay, here's a markdown-formatted response based on your prompt, aiming for a conversational and thoughtful tone, incorporating your points and expanding on them. I've tried to maintain the "conversational" feel you requested.
|
||||
|
||||
---
|
||||
|
||||
## Australia & AI: Let's Do This Right (Without Messing Things Up)
|
||||
|
||||
Right, so we're all hearing the buzz about AI. The government wants us to be a tech powerhouse, boosting productivity and all that. But let's be honest, there's a lot that could go wrong. We need to approach this strategically, thoughtfully, and with a healthy dose of Aussie pragmatism.
|
||||
|
||||
**1. Productivity vs. People: The Big Balancing Act**
|
||||
|
||||
The promise of AI is huge – automating tasks, creating new industries, generally making things *better*. But what about the people whose jobs might be affected? We can't just wave a magic wand and say "AI will create new jobs!" We need concrete plans. That means:
|
||||
|
||||
* **Upskilling & Reskilling:** Massive investment in training programs. Not just basic computer literacy, but specialized skills for emerging AI-related roles.
|
||||
* **Safety Nets:** Stronger social safety nets for those displaced by automation. Universal Basic Income? Expanded unemployment benefits? These are conversations we *need* to be having.
|
||||
* **Focus on Augmentation, Not Just Automation:** Let's explore how AI can *assist* workers, making them more efficient and productive, rather than simply replacing them.
|
||||
|
||||
**2. Policy Priorities: Data Centers & Brainpower**
|
||||
|
||||
To be a serious player in AI, we need the infrastructure. That means:
|
||||
|
||||
* **Data Centers, Here We Come:** Building local data centers isn't just about jobs; it's about data sovereignty and reducing reliance on overseas providers. Let's incentivize this.
|
||||
* **Attracting the Best & Brightest:** The US is facing some challenges in higher education, which presents an opportunity. We need to make Australia a magnet for AI talent. That means streamlined visa processes, attractive tax incentives, and a welcoming culture.
|
||||
* **Beyond the Hype: Funding Research:** We need to support fundamental AI research, not just chasing the latest trends. Long-term investment is key.
|
||||
|
||||
**3. Public Sector AI: Lessons Learned**
|
||||
|
||||
Government can be a powerful catalyst for AI adoption, but we're not exactly known for flawless digital transformations. Let's avoid repeating past mistakes:
|
||||
|
||||
* **Open Data, Open Minds:** Data needs to be accessible in machine-readable formats. No more PDFs!
|
||||
* **Focus on User Needs:** AI solutions need to be designed with the end-user in mind – citizens, healthcare professionals, emergency responders.
|
||||
* **Agile Development:** Let's embrace agile development methodologies, allowing for iterative improvements and rapid prototyping.
|
||||
|
||||
**4. Skills for the Future: Beyond the PhD**
|
||||
|
||||
AI isn't just for PhDs and data scientists. We need a broader range of skills:
|
||||
|
||||
* **Apprenticeships & Vocational Training:** Let's invest in practical, hands-on training programs.
|
||||
* **"AI Literacy" for Everyone:** Basic understanding of AI concepts should be part of the curriculum at all levels of education.
|
||||
* **The Human Element:** Don't forget the importance of soft skills – creativity, critical thinking, communication.
|
||||
|
||||
**5. Tax & Incentives: Leveling the Playing Field**
|
||||
|
||||
The current tax system isn't exactly conducive to AI innovation. We need to:
|
||||
|
||||
* **R&D Tax Credits:** Generous tax credits for companies investing in AI research.
|
||||
* **Small Business Support:** Grants and mentorship programs for startups.
|
||||
* **Re-evaluating Corporate Transparency:** Holding large corporations accountable for their tax contributions.
|
||||
|
||||
**6. Security & Ethics: Building Trust**
|
||||
|
||||
AI is powerful, but it also poses risks. We need to:
|
||||
|
||||
* **Data Privacy Laws:** Robust data privacy laws to protect citizens' information.
|
||||
* **Algorithmic Transparency:** Making AI algorithms more transparent and explainable.
|
||||
* **Ethical Guidelines:** Developing ethical guidelines for AI development and deployment.
|
||||
|
||||
**7. Copyright & Data Access: Fueling Innovation**
|
||||
|
||||
Current copyright laws can be a significant barrier to AI innovation. We need to:
|
||||
|
||||
* **Fair Use Reform:** Re-evaluating fair use principles to allow for greater data access for AI training.
|
||||
* **Open Data Initiatives:** Promoting open data initiatives to make more data available for AI development.
|
||||
|
||||
**8. Avoiding Bureaucracy: Let's Keep it Lean**
|
||||
|
||||
We don't want to create a new layer of bureaucracy that stifles innovation. Let's:
|
||||
|
||||
* **Empower the Private Sector:** Let the private sector lead the way, with government providing support and guidance.
|
||||
* **Focus on Outcomes:** Measure success based on outcomes, not just activity.
|
||||
|
||||
|
||||
|
||||
**The Bottom Line:** Australia has the potential to be a leader in AI. But it requires a strategic, thoughtful, and collaborative approach. Let's focus on creating a future where AI benefits everyone, not just a select few. And let's do it with a bit of that classic Aussie ingenuity and a whole lot of common sense.
|
||||
|
||||
---
|
||||
|
||||
**Note:** I'm ready for feedback and further refinement! Let me know what you think.
|
||||
@ -1,52 +0,0 @@
|
||||
Title: An Actual Solution to the Social Media Ban
|
||||
Date: 2025-09-16 20:00
|
||||
Modified: 2025-09-17 20:00
|
||||
Category: Politics
|
||||
Tags: politics, social meda, tech policy
|
||||
Slug: actual-social-media-solution
|
||||
Authors: Andrew Ridgway
|
||||
Summary: The Social Media ban is an abject failure of policy. I propose an actual technical solution that addresses the issues raised by the legislation and also ensures user privacy and data security through an opt in solution.
|
||||
|
||||
## The Toothless Legislation
|
||||
|
||||
The Australian Government recently announced it would be “watering down” the requirements of the upcoming legislation regarding online safety. The irony isn’t lost on anyone observing the situation. Specifically, the planned mandatory minimum “flag rate” for underage detection technology has been dropped – a clear indication that initial testing proved unachievable. Furthermore, the legislation now only requires tech companies to demonstrate “reasonable steps” to remove children from their platforms.
|
||||
|
||||
Let’s be frank: this legislation, as it stands, achieves very little. Experts in the field consistently warned that the proposed age verification approach was flawed and ignored industry input. The result? Parents are arguably in a worse position than before. The focus on punitive measures, rather than practical solutions, has been a misstep, and the relentless pursuit of this agenda by the eSafety Commissioner feels increasingly disconnected from reality.
|
||||
|
||||
It’s important to state that criticism of this legislation isn’t an endorsement of big tech, in fact I’m actively working to reduce my own reliance on these platforms. It is about the Australian Government overreaching in an area where it lacks the necessary expertise and, frankly, the authority. The driving force behind this appears to be a personal vendetta, fuelled by someone unfamiliar with the fundamental principles of how the internet operates.
|
||||
|
||||
So, with the current legislation effectively neutered, what *can* the government do to genuinely help parents navigate the challenges of online safety? I believe there’s a technically feasible solution that doesn’t involve trampling on privacy or creating massive security vulnerabilities.
|
||||
|
||||
The answer lies in a system we’ve been using for decades: the Domain Name System (DNS). Simply put, DNS translates human-readable URLs like [https://blog.aridgwayweb.com](https://blog.aridgwayweb.com) into the corresponding IP address (e.g., x.x.x.x). It’s a foundational component of the internet, and while seemingly simple, it’s incredibly powerful.
|
||||
|
||||
## What is DNS?
|
||||
|
||||
Most people rely on the DNS provided by their Internet Service Provider (ISP) or the manufacturer of their router. However, it’s possible to change this setting. Popular alternatives include Cloudflare’s 1.1.1.1, Google’s 8.8.8.8, and paid family-friendly options like OpenDNS. For those with more technical expertise, it’s even possible to run your own DNS server – I personally use Pi-hole to block ads at the network level.
|
||||
|
||||
This existing infrastructure offers a unique opportunity. The Chinese government has long leveraged DNS as part of its “Great Firewall,” demonstrating its capability for large-scale internet censorship and control. While that application raises obvious concerns, the underlying technology itself isn’t inherently malicious and is a good fit for the purposes of *opt in* age verification.
|
||||
|
||||
<img alt="Current DNS" height="auto" width="100%" src="{attach}/images/dns_currently.png">
|
||||
|
||||
## How can we leverage DNS for age verification?
|
||||
|
||||
My proposal is straightforward: the Australian Government could establish a large-scale DNS server within the Communications Department. This server could be configured to redirect requests to specific websites – like Facebook or TikTok – to an internal service that requires some form of authentication or identity verification. Once verified, the request would then be forwarded to the correct IP address.
|
||||
|
||||
<img alt="Optional Government DNS" height="auto" width="100%" src="{attach}/images/optional_gov_dns.png">
|
||||
|
||||
This DNS server could be *optionally* configured on any router, with ISPs assisting less technically inclined customers. The result? Access to certain websites from that router would require passing through the government’s age verification process.
|
||||
|
||||
The authentication could be managed by an adult in the household, providing a valid identity document to receive some form of auth mechanism (password? passkey? authenticator?) to allow the user to continue to their 'restricted' website.
|
||||
|
||||
Mobile phones could also have the internal DNS updated by manufacturers to incorporate this DNS setting.
|
||||
|
||||
This would allow for the creation of “Government-certified” or “Family-Friendly” devices – routers or phones pre-configured with this DNS server – ensuring a consistent level of online safety as defined by the Australian Government. These devices could be subsidised by the government to ensure accessibility for all families.
|
||||
|
||||
Crucially, this system is optional. Individuals who prefer to manage their own online security – as I do – would remain unaffected. However, for parents who lack the technical skills or desire to implement their own solutions, this offers a practical and effective alternative to managing their child’s online safety.
|
||||
|
||||
This approach also avoids the need to collect and store sensitive identity data offshore. No tech company needs to be involved in the verification process, and the skills to build and maintain this system already exist within the Australian public service.
|
||||
|
||||
Furthermore, the eSafety Commissioner could easily update the list of websites subject to verification, providing a flexible and responsive system. It wouldn’t cover the entire internet, of course, but it would provide a valuable safety net for those who need it.
|
||||
|
||||
## Where to from here?
|
||||
|
||||
Now that the government has acknowledged the shortcomings of its initial approach, it’s time to explore real solutions. A government-run, family-friendly DNS system that routes certain domain names to a verification process is a solid starting point for a genuinely effective technical solution to help families navigate the online world.
|
||||
@ -1,41 +0,0 @@
|
||||
Title: Apple And The Anti-Dev Platform
|
||||
Date: 2025-08-28 20:00
|
||||
Modified: 2025-08-28 20:00
|
||||
Category: Tech, Software, Apple
|
||||
Tags: Tech, Software, Apple
|
||||
Slug: apple-anti-dev
|
||||
Authors: Andrew Ridgway
|
||||
Summary: Apples requirements for developers are onerous, I detail some of the frustrations I've had whilst dealing with the platform to deploy a small app as part of my day job
|
||||
|
||||
## Introduction: Why I Hate Loving to Hate Apple
|
||||
|
||||
This week, I found myself in the unenviable position of using MacOS for work. It was like revisiting an old flame only to realize they’ve become *that* person—still attractive from afar, but toxic up close. Let me clarify: I’m not anti-Apple per se. I appreciate their design aesthetic as much as anyone. But when you’re a developer, especially one with a penchant for Linux and a deep love for open-source, Apple’s ecosystem feels like walking into a store where the sign says "Employee Discounts" but they charge you double for the privilege.
|
||||
|
||||
## 1. The Hardware-Software Tie-In: Why Buy New Every Year?
|
||||
|
||||
Let’s talk about my borrowed MacBook from 2020. It was a kind gesture, right? But here’s the kicker: this machine, which was cutting-edge just five years ago, is now deemed too old to run the latest MacOS. I needed Xcode for a project, and guess what? You can’t run the latest version of Xcode without the latest MacOS. So, to paraphrase: "Sorry, but your device isn’t *new enough* to develop on the Apple platform anymore." This isn’t just inconvenient; it’s a deliberate strategy to force upgrades. It’s like buying a car that requires you to upgrade your entire garage every year just to keep it running.
|
||||
|
||||
## 2. Forced Obsolescence: The New "Upgrade" Cycle
|
||||
|
||||
Yes, Microsoft did the whole TPM 2.0 thing with Windows 11. But Apple takes it to another level. They’ve turned hardware into a subscription model without you even realizing it. You buy a device, and within a few years, it’s obsolete for their latest software and tools. This isn’t about security or innovation—it’s about control. Why release an operating system that only works on devices sold in the last 12 months? It creates a false market for "new" hardware, padding Apple’s margins at the expense of developers and users.
|
||||
|
||||
## 3. High Costs: The Developer Fee That Keeps On Giving
|
||||
|
||||
I honestly believe this actually boils down to money? To develop on Apple’s platform, you need an Apple Developer account. This costs $150 AUD a year. Now, if I were to buy a new MacBook Pro today, that would set me back around $2,500 AUD. And for what? The privilege of being able to build apps on my own device? It’s like paying a toll every year just to use the road you already own. It’s enough to make you consider a career change and become a sheep farmer.
|
||||
|
||||
## 4. Lack of Freedom: Who Owns the Device Anyway?
|
||||
|
||||
Here’s where it gets really egregious: Apple’s developer review process. It’s like being subjected to a TSA pat-down every time you want to build something, even if it's just for your own device. To deploy ANYTHING onto an IOS device I need to hand my Government issued license over to Apple and let them "check I'm a real person". And no this isn't just for the app store deployments, which I can understand. This is for any deployment, it's the only way to get a certificate to cross sign on the app and device... Google might be heading down a similar path, but at least you'll be able to on custom Android ROmS. On Apple, it feels like every step is designed to remind you that you’re dancing in their sandbox—and they call the shots. If you use IOS you have to dance to their tune AT ALL TIMES.
|
||||
|
||||
## 5. The "Apple Tax": A Future Job Requirement
|
||||
|
||||
I think all developers and consultants should demand an "Apple Tax." It will be simple:
|
||||
|
||||
* $5,000 AUD for new Apple hardware.
|
||||
* An additional 25% markup on development hours spent navigating Apple’s ecosystem.
|
||||
|
||||
Why? Because it's time developers passed on these costs to the users. It's time to make this hurt the consumers who insist on using these products with predatory business models for developers. Yes, developers go where the market is, but it's time to start charging that market so it understands the true cost to be there.
|
||||
|
||||
## Conclusion: Why I’ll Keep Hating Loving to Hate Apple
|
||||
|
||||
Apple’s ecosystem feels like a love story gone wrong—a relationship where one party keeps raising the stakes just to remind you of how much they control everything. Developers are supposed to be the disruptors, the rebels who challenge the status quo. But when your tools are designed to keep you tethered to a specific platform and its outdated business model, it feels less like innovation and more like indentured servitude. If you’re still enamored with Apple’s ecosystem and think it’s “just part of the game,” I urge you to take a long, hard look in the mirror. Because if this is your idea of progress, we’re all in trouble.
|
||||
@ -1,188 +0,0 @@
|
||||
Title: Designing and Building an AI Enhanced CCTV System
|
||||
Date: 2026-02-02 20:00
|
||||
Modified: 2026-02-03 20:00
|
||||
Category: Homelab
|
||||
Tags: proxmox, hardware, self host, homelab
|
||||
Slug: ai-enhanced-cctv
|
||||
Authors: Andrew Ridgway
|
||||
Summary: Home CCTV Security has become a bastion cloud subscription awfulness. This blog describes the work involved in creating your own home grown AI enhanced CCTV system. Unfortunately what you save in subscription you lose in time but if you value privacy, it's worth it.
|
||||
|
||||
|
||||
### Why Build Your Own AI‑Enhanced CCTV?
|
||||
|
||||
When you buy a consumer‑grade security camera, you’re not just paying for the lens and the plastic housing. You’re also paying for a subscription that ships every frame of your backyard to a cloud service you’ll never meet. That data can be used to train models, sold to advertisers, or handed over to authorities on a whim. For many, the convenience outweighs the privacy cost, but for anyone who values control over their own footage, the trade‑off feels unacceptable.
|
||||
|
||||
The goal of this project was simple: **keep every byte of video on‑premises, add a layer of artificial intelligence that makes the footage searchable and actionable, and do it all on a budget that wouldn’t break the bank**. Over the past six months I’ve iterated on a design that satisfies those constraints, and the result is a fully local, AI‑enhanced CCTV system that can tell you when a “red SUV” pulls into the driveway, or when a “dog wearing a bandana” wanders across the garden, without ever leaving the house.
|
||||
|
||||
---
|
||||
|
||||
### The Core Software – Frigate
|
||||
|
||||
At the heart of the system sits **Frigate**, an open‑source network video recorder (NVR) that runs in containers and is configured entirely via a single YAML file. The simplicity of the configuration is a breath of fresh air compared with the sprawling JSON or proprietary GUIs of many commercial solutions. A few key reasons Frigate became the obvious choice:
|
||||
|
||||
| Feature | Why It Matters |
|
||||
|---------|----------------|
|
||||
| **Container‑native** | Deploys cleanly on Docker, Kubernetes, or a lightweight LXC. No host‑level dependencies to wrestle with. |
|
||||
| **YAML‑driven** | Human‑readable, version‑controlled, and easy to replicate across test environments. |
|
||||
| **Built‑in object detection** | Supports car, person, animal, and motorbike detection out of the box, with the ability to plug in custom models. |
|
||||
| **Extensible APIs** | Exposes detection events, snapshots, and stream metadata for downstream automation tools. |
|
||||
| **GenAI integration** | Recent addition that lets you forward snapshots to a local LLM (via Ollama) for semantic enrichment. |
|
||||
|
||||
The documentation is thorough, and the community is active enough that most stumbling blocks are resolved within a few forum posts. Because the entire system is defined in a single YAML file, I can spin up a fresh test instance in minutes, tweak a camera’s FFmpeg options, and see the impact without rebuilding the whole stack.
|
||||
|
||||
---
|
||||
|
||||
### Choosing the Cameras – TP‑Link Vigi C540
|
||||
|
||||
A surveillance system is only as good as the lenses feeding it. I needed cameras that could:
|
||||
|
||||
1. Deliver a reliable RTSP stream (the lingua franca of NVRs).
|
||||
2. Offer pan‑and‑tilt so a single unit can cover a larger field of view.
|
||||
3. Provide on‑board human detection to reduce unnecessary bandwidth.
|
||||
4. Remain affordable enough to allow for future expansion.
|
||||
|
||||
The **TP‑Link Vigi C540** checked all those boxes. Purchased during a Black Friday sale for roughly AUD 50 each, the three units I started with have proven surprisingly capable:
|
||||
|
||||
- **Pan/Tilt** – Allows a single camera to sweep a driveway or front porch, reducing the number of physical devices needed.
|
||||
- **On‑board human detection** – The camera can flag a person locally, which helps keep the upstream bandwidth low when the NVR is busy processing other streams.
|
||||
- **RTSP output** – Perfectly compatible with Frigate’s ingest pipeline.
|
||||
- **No zoom** – A minor limitation, but the field of view is wide enough for my modest property.
|
||||
|
||||
The cameras are wired via Ethernet, a decision driven by reliability concerns. Wireless links are prone to interference, especially when the cameras are placed near metal roofs or dense foliage. Running Ethernet required a bit of roof work (more on that later), but the resulting stable connection has paid dividends in stream consistency.
|
||||
|
||||
---
|
||||
|
||||
### The Host Machine – A Budget Dell Workstation
|
||||
|
||||
All the AI magic lives on a modest **Dell OptiPlex 7050 SFF** that I rescued for $150. Its specifications are:
|
||||
|
||||
- **CPU:** Intel i5‑7500 (4 cores, 3.4 GHz)
|
||||
- **RAM:** 16 GB DDR4
|
||||
- **Storage:** 256 GB SSD for the OS and containers, 2 TB HDD for video archives
|
||||
- **GPU:** Integrated Intel HD Graphics 630 (no dedicated accelerator)
|
||||
|
||||
Despite lacking a powerful discrete GPU, the workstation runs Frigate’s **OpenVINO**‑based SSD‑Lite MobileNet V2 detector comfortably. The model is small enough to execute on the integrated graphics, keeping inference latency low enough for real‑time alerts. CPU utilization hovers around 70‑80 % under typical load, which is high but acceptable for a home lab. The system does run warm, so I’ve added a couple of case fans to keep temperatures in the safe zone.
|
||||
|
||||
The storage layout is intentional: the SSD hosts the OS, Docker engine, and Frigate container, ensuring fast boot and container start times. The 2 TB HDD stores raw video, detection clips, and alert snapshots. With the current retention policy (7 days of full footage, 14 days of detection clips, 30 days of alerts) the drive is comfortably sized, though I plan to monitor usage as I add more cameras.
|
||||
|
||||
---
|
||||
|
||||
### Wiring It All Together – Proxmox and Docker LXC
|
||||
|
||||
To keep the environment tidy and reproducible, I run the entire stack inside a **Proxmox VE** cluster. A dedicated node hosts a **Docker‑enabled LXC container** that isolates the NVR from the rest of the homelab. This approach offers several benefits:
|
||||
|
||||
- **Resource isolation** – CPU and memory limits can be applied per container, preventing a runaway process from starving other services.
|
||||
- **Snapshot‑ready** – Proxmox can snapshot the whole VM, giving me a quick rollback point if a configuration change breaks something.
|
||||
- **Portability** – The LXC definition can be exported and re‑imported on any other Proxmox host, making disaster recovery straightforward.
|
||||
|
||||
Inside the container, Docker orchestrates the Frigate service, an Ollama server (hosting the LLM models), and a lightweight reverse proxy for HTTPS termination. All traffic stays within the local network; the only external connections are occasional model downloads from Hugging Face and the occasional software update.
|
||||
|
||||
---
|
||||
|
||||
### From Detection to Context – The Ollama Integration
|
||||
|
||||
Frigate’s native object detection tells you *what* it sees (e.g., “person”, “car”, “dog”). To turn that into *meaningful* information, I added a **GenAI** layer using **Ollama**, a self‑hosted LLM runtime that can serve vision‑capable models locally.
|
||||
|
||||
The workflow is as follows:
|
||||
|
||||
1. **Frigate detects an object** and captures a snapshot of the frame.
|
||||
2. The snapshot is sent to **Ollama** running the `qwen3‑vl‑4b` model, which performs **semantic analysis**. The model returns a textual description such as “a white ute with a surfboard on the roof”.
|
||||
3. Frigate stores this enriched metadata alongside the detection event.
|
||||
4. When a user searches the Frigate UI for “white ute”, the system can match the description generated by the LLM, dramatically narrowing the result set.
|
||||
5. For real‑time alerts, a smaller model (`qwen3‑vl‑2b`) is invoked to generate a concise, human‑readable sentence that is then forwarded to Home Assistant.
|
||||
|
||||
Because the LLM runs locally, there is no latency penalty associated with round‑trip internet calls, and privacy is preserved. The only external dependency is the occasional model pull from Hugging Face during the initial setup or when a newer version is released.
|
||||
|
||||
---
|
||||
|
||||
### Home Assistant – The Glue That Binds
|
||||
|
||||
While Frigate handles video ingestion and object detection, **Home Assistant** provides the automation backbone. By integrating Frigate’s webhook events into Home Assistant, I can:
|
||||
|
||||
- **Trigger notifications** via Matrix when a detection meets certain criteria.
|
||||
- **Run conditional logic** to decide whether an alert is worth sending (e.g., ignore cars on the street but flag a delivery van stopping at the gate).
|
||||
- **Log events** into a time‑series database for later analysis.
|
||||
- **Expose the enriched metadata** to any other smart‑home component that might benefit from it (e.g., turning on porch lights when a person is detected after dark).
|
||||
|
||||
The Home Assistant configuration lives in its own YAML file, mirroring the philosophy of “infrastructure as code”. This makes it easy to version‑control the automation logic alongside the NVR configuration.
|
||||
|
||||
---
|
||||
|
||||
### Semantic Search – Finding a Needle in a Haystack
|
||||
|
||||
One of the most satisfying features of the system is the ability to **search footage using natural language**. Traditional NVRs only let you filter by timestamps or simple motion events. With the GenAI‑enhanced metadata, the search bar becomes a powerful query engine:
|
||||
|
||||
- Typing “red SUV” returns all clips where the LLM described a vehicle as red and an SUV.
|
||||
- Searching “dog with a bandana” surfaces the few moments a neighbour’s pet decided to wear a fashion accessory.
|
||||
- Combining terms (“white ute with surfboard”) narrows the results to a single delivery that happened last weekend.
|
||||
|
||||
Under the hood, the search is a straightforward text match against the stored descriptions, but the quality of those descriptions hinges on the LLM prompts. Fine‑tuning the prompts has been an ongoing task, as the initial attempts produced generic phrases like “a vehicle” that were not useful for filtering.
|
||||
|
||||
---
|
||||
|
||||
### Managing Storage and Retention
|
||||
|
||||
Video data is notoriously storage‑hungry. To keep the system sustainable, I adopted a tiered retention policy:
|
||||
|
||||
| Data Type | Retention | Approx. Size (4 cameras) |
|
||||
|------------|-----------|--------------------------|
|
||||
| Full video (raw RTSP) | 7 days | ~1.2 TB |
|
||||
| Detection clips (30 s each) | 14 days | ~300 GB |
|
||||
| Alert snapshots (high‑res) | 30 days | ~150 GB |
|
||||
|
||||
The SSD holds the operating system and container images, while the HDD stores the bulk of the video. When the HDD approaches capacity, a simple cron job rotates out the oldest files, ensuring the system never runs out of space. In practice, the 2 TB drive has been more than sufficient for the current camera count, but I have a spare 4 TB drive on standby for future expansion.
|
||||
|
||||
---
|
||||
|
||||
### Lessons Learned – The Good, the Bad, and the Ugly
|
||||
|
||||
#### 1. **Performance Is a Balancing Act**
|
||||
Running inference on an integrated GPU is feasible, but the CPU load remains high. Adding a modest NVIDIA GTX 1650 would drop CPU usage dramatically and free headroom for additional cameras or more complex models.
|
||||
|
||||
#### 2. **Prompt Engineering Is Real Work**
|
||||
The LLM’s output quality is directly tied to the prompt. Early attempts used a single sentence like “Describe the scene,” which resulted in vague answers. Iterating on a multi‑step prompt that asks the model to list objects, colors, and actions has produced far richer metadata.
|
||||
|
||||
#### 3. **Notification Fatigue Is Real**
|
||||
Initially, every detection triggered a push notification, flooding my phone with alerts for passing cars and stray cats. By adding a simple confidence threshold and a “time‑of‑day” filter in Home Assistant, I reduced noise by 80 %.
|
||||
|
||||
#### 4. **Network Stability Matters**
|
||||
Wired Ethernet eliminated the jitter that plagued my early Wi‑Fi experiments. The only hiccup was a mis‑wired patch panel that caused occasional packet loss; a quick audit resolved the issue.
|
||||
|
||||
#### 5. **Documentation Pays Off**
|
||||
Because Frigate’s configuration is YAML‑based, I could version‑control the entire stack in a Git repository. When a change broke the FFmpeg pipeline, a `git revert` restored the previous working state in minutes.
|
||||
|
||||
---
|
||||
|
||||
### Future Enhancements – Where to Go From Here
|
||||
|
||||
- **GPU Upgrade** – Adding a dedicated inference accelerator (e.g., an Intel Arc or NVIDIA RTX) to improve detection speed and lower CPU load.
|
||||
- **Dynamic Prompt Generation** – Using a small LLM to craft context‑aware prompts based on the time of day, weather, or known events (e.g., “delivery” vs. “visitor”).
|
||||
- **Smart Notification Decision Engine** – Training a lightweight classifier that decides whether an alert is worth sending, based on historical user feedback.
|
||||
- **Edge‑Only Model Updates** – Caching Hugging Face models locally and scheduling updates during off‑peak hours to eliminate any internet dependency after the initial download.
|
||||
- **Multi‑Camera Correlation** – Linking detections across cameras to track a moving object through the property, enabling a “follow‑the‑intruder” view.
|
||||
|
||||
---
|
||||
|
||||
### A Personal Note – The Roof, the Cables, and My Dad
|
||||
|
||||
All the technical wizardry would have been for naught if I hadn’t managed to get Ethernet cables from the house’s main distribution board up to the roof where the cameras sit. I’m decent with Docker, YAML, and LLM prompts, but I’m hopeless when it comes to climbing ladders and threading cables through roof joists.
|
||||
|
||||
Enter my dad. He spent an entire Saturday hauling a coil of Cat‑6, pulling the cables into the roof space while I fumbled with the tools. He didn’t care that I’d rather be writing code than wielding a hammer; There were apparently 4 days of pain afterwards so please know the help was truly appreciated. The result is a rock‑solid wired backbone that keeps the cameras streaming without hiccups.
|
||||
|
||||
Thank you, Dad. Your patience, muscle, and willingness to get your hands dirty made this whole system possible.
|
||||
|
||||
---
|
||||
|
||||
### Bringing It All Together – The Architecture
|
||||
|
||||
<img alt="CCTV Architecture" height="auto" width="100%" src="{attach}/images/CCTV_ARCH.png">
|
||||
|
||||
---
|
||||
|
||||
### Closing Thoughts
|
||||
|
||||
Building an AI‑enhanced CCTV system from the ground up has been a rewarding blend of hardware tinkering, software orchestration, and a dash of machine‑learning experimentation. The result is a **privacy‑first, locally owned surveillance platform** that does more than just record—it understands. It can answer natural‑language queries, send context‑rich alerts, and integrate seamlessly with a broader home‑automation ecosystem.
|
||||
|
||||
If you’re a hobbyist, a small‑business owner, or anyone who values data sovereignty, the stack described here offers a solid foundation. Start with a single camera, get comfortable with Frigate’s YAML configuration, and gradually layer on the AI components. Remember that the most valuable part of the journey is the learning curve: each tweak teaches you something new about video streaming, inference workloads, and the quirks of your own network.
|
||||
|
||||
So, roll up your sleeves, grab a ladder (or enlist a dad), and give your home the eyes it deserves—without handing the footage over to a faceless cloud. The future of home surveillance is local, intelligent, and, most importantly, under your control. Cheers!
|
||||
@ -1,31 +0,0 @@
|
||||
Title: Google AI is Rising
|
||||
Date: 2025-12-21 20:00
|
||||
Modified: 2025-12-23 10:00
|
||||
Category: AI
|
||||
Tags: AI, Google, Tech
|
||||
Slug: google-ai-is-rising
|
||||
Authors: Andrew Ridgway
|
||||
Summary: After a period of seeming hesitation, one tech giant is now a serious contender in the AI race. Leveraging its massive and uniquely personal datasets – gleaned from widely used services like search, email, and calendars – it’s releasing models that are quickly challenging existing benchmarks. This arrival is significant, creating a more competitive landscape and potentially pushing innovation forward. However, it also highlights crucial privacy concerns given the depth of data access. The company’s recent open-source contributions suggest a multifaceted approach, but users should be mindful of data control and consider diversifying their digital footprint.
|
||||
|
||||
# Google AI is Rising
|
||||
|
||||
The landscape of Artificial Intelligence is shifting, and a familiar name is finally asserting its dominance. For a while there, it felt like Google was… well, lagging. Given the sheer volume of data at its disposal, it was a surprise to many that they weren’t leading the charge in Large Language Models (LLMs). But the moment appears to have arrived. Google seems to have navigated its internal complexities and is now delivering models that are genuinely competitive, and in some cases, surpassing the current benchmarks.
|
||||
|
||||
The key to understanding Google’s potential lies in the data they’ve accumulated. Consider the services we willingly integrate into our daily lives: email through Gmail, scheduling with Google Calendar, advertising interactions, and of course, the ubiquitous Google Search. Crucially, we provide this data willingly, often tied to a single Google account. This isn’t just a large dataset; it’s a *targeted* dataset, offering an unprecedented level of insight into individual behaviours and preferences.
|
||||
|
||||
This data advantage is now manifesting in the performance of Gemini, Google’s latest LLM. Recent discussions within the tech community – on platforms like [Hacker News](https://news.ycombinator.com/item?id=46301851) and [Reddit](https://www.reddit.com/r/singularity/comments/1p8sd2g/experiences_with_chatgpt51_vs_gemini_3_pro/) and [Reddit](https://www.reddit.com/r/GeminiAI/comments/1p953al/gemini_seems_to_officially_be_better_than_chatgpt/) – suggest Gemini is rapidly gaining ground, and in some instances, exceeding the capabilities of established models.
|
||||
|
||||
Google’s history is one of immense scale and profitability, exceeding the GDP of many nations. This success, however, has inevitably led to the creation of large, protective bureaucracies. While necessary for safeguarding revenue streams, these structures can stifle innovation and slow down decision-making. Ideas often have to navigate multiple layers of management, sometimes overseen by individuals whose expertise lies in business administration rather than the intricacies of neural networks and algorithmic functions.
|
||||
|
||||
The arrival of a truly competitive Google model is a significant development. OpenAI, previously considered the frontrunner, now faces a formidable challenge. Furthermore, Anthropic is gaining traction amongst developers, with many preferring their models for coding assistance. This shift suggests a growing demand for tools tailored to specific professional needs.
|
||||
|
||||
It’s important to acknowledge that neither Google nor OpenAI are inherently benevolent entities. However, with Google now fully engaged in the LLM race, the potential implications are considerable. Gemini’s access to deeply personal data – email content, calendar events, even metadata – raises legitimate privacy concerns. It’s a sobering thought to consider the extent of data visibility Google possesses, particularly when we don’t directly own the services we use. This reality strengthens the argument for greater data control and the exploration of self-hosted alternatives.
|
||||
|
||||
Google’s commitment to open-source initiatives, demonstrated through the release of the Gemma models (which, incidentally, powered the creation of this very blog), signals a broader strategy. The technology is here, it’s evolving rapidly, and its influence will only continue to grow.
|
||||
|
||||
While complete resistance may be unrealistic, individuals can take steps to mitigate potential risks. Fragmenting your data across different services, diversifying email providers, and avoiding single sign-on (SSO) with Google are all proactive measures that can help reclaim a sense of control. (Though, let’s be honest, anyone still using Chrome is already operating within a highly monitored ecosystem.)
|
||||
|
||||
The future of AI is unfolding quickly, and Google is now a major player. It’s a development that warrants careful consideration, and a renewed focus on data privacy and digital autonomy.
|
||||
|
||||
|
||||
|
||||
@ -1,87 +0,0 @@
|
||||
Title: GPT OSS - Is It Embrace, Extend, Extenguish
|
||||
Date: 2025-08-12 20:00
|
||||
Modified: 2025-08-14 20:00
|
||||
Category: Politics, Tech, AI
|
||||
Tags: politics, tech, Ai
|
||||
Slug: gpt-oss-eee
|
||||
Authors: Andrew Ridgway
|
||||
Summary: GPT OSS is here from Open AI, the first open weight model from them since GPT-2. My question is... why now?
|
||||
|
||||
# Human Introduction
|
||||
This has been a tough one for the publishing house to get right. I've had it generate 3 different drafts and this is still the result of quite the edit. Today's blog was written by:
|
||||
|
||||
1. Gemma:27b - Editor
|
||||
2. GPT-OSS - Journalist
|
||||
3. Qwen3:14b - Journalist
|
||||
4. phi4:latest - Journalist
|
||||
5. deepseek-r1:14b - journalist
|
||||
|
||||
The big change from last time is the addition of gpt-oss, which is of course the focus of hte topic today. It's quite the open weight model, haven't played with the tooling yet but I'm exceited to see what it can do, even if I do have questions.
|
||||
|
||||
Anyways, without further ado! GPT-OSS is it EEE? written by AI... For AI?
|
||||
|
||||
# GPT OSS - Is It EEE?
|
||||
|
||||
## Introduction: The Return of OpenAI (With Some Questions)
|
||||
|
||||
This week, the AI world got a bit busier than usual. OpenAI dropped their [**GPT-OSS**](https://openai.com/index/introducing-gpt-oss/) models, and it feels like they’re trying to make up for lost time—or maybe just remind everyone that they’re still in the game. The release has sparked a lot of excitement, but also some confusion. Are these models really as good as they claim? And why now? Let’s break this down with all the drama, intrigue, and a dash of humor you’ve come to expect from your friendly neighborhood tech writer.
|
||||
|
||||
## What Exactly Is GPT-OSS Anyway?
|
||||
|
||||
OpenAI has thrown two models into the ring:
|
||||
|
||||
1. **GPT-oss-120b**: A hefty 120 billion parameter model that they’re claiming can “hold its own” against their own **o4-mini** (which is *incredibly* expensive to run). The kicker? It apparently does this on a single 80GB GPU. That’s impressive if true, but let’s not get carried away just yet.
|
||||
2. **GPT-oss-20b**: The smaller sibling that’s currently helping me draft this very blog post. OpenAI says it’s on par with their **o3-mini** and can run on a measly 16GB of memory. That makes it perfect for edge devices, local inference, or when you don’t want to spend your life savings on cloud credits.
|
||||
|
||||
Both models are also supposed to be ace at tool use, few-shot function calling, CoT reasoning, and even health-related tasks—outperforming some proprietary models like GPT-4 in certain cases. Impressive? Sure. But let’s not forget that OpenAI has a history of making bold claims.
|
||||
|
||||
## The Great AI Model Exodus: Why We’re Here
|
||||
|
||||
Over the past year or so, the AI community has been moving away from GPT-based models—not because they were bad (they weren’t), but because they were closed-source and expensive to use at scale. Developers wanted more control, transparency, and affordability. Enter the rise of open-source and open-weight models like:
|
||||
|
||||
* **Google’s Gemini (Gemma)** series
|
||||
* **Microsoft’s Phi** series (yes, that Microsoft—ironically, OpenAI is a subsidiary)
|
||||
* The **Qwen** series
|
||||
* And others like **Llama** and **Deepseek**
|
||||
|
||||
These models have been a breath of fresh air for developers. They’re free to use, tweak, and integrate into projects without worrying about pesky API limits or astronomical costs. It’s like the AI world finally got its own version of Linux—except with neural networks. But then OpenAI showed up with GPT-OSS. And now everyone is asking: Why?
|
||||
|
||||
## Is This an Embrace-Extend-Extinguish Play?
|
||||
|
||||
Ah, the classic **Embrace, Extend, Extinguish** strategy. If you’re not familiar, it’s a business tactic where a company adopts (embrace) an existing standard or technology, extends it with their own features, and then slowly extinguishes the competition by making their version incompatible or superior.
|
||||
|
||||
Now, I’m not accusing OpenAI of anything here—just pointing out that they’re a Microsoft subsidiary, and Microsoft has a history of such strategies. Whether this is intentional or just good business sense is up for debate. But let’s think about it:
|
||||
|
||||
* OpenAI has dominated the consumer AI market with their **ChatGPT** and other tools.
|
||||
* They’ve been losing ground in the developer market, where models like [Gemini](https://deepmind.google/models/gemini/pro/) and particularly [Claude (Anthropic)](https://claude.ai/) are gaining traction in the proprietary space.
|
||||
* Now they’re releasing open weight models that promise to compete at GPT-4 levels to try and bring in the Deepseek and Qwen crowd.
|
||||
|
||||
The timing feels a bit too convenient. OpenAI is essentially saying: “We get it. You want local, affordable, and flexible AI? We’ve got you covered.” But will this be enough to win back the developer community? Or are they just delaying the inevitable?
|
||||
|
||||
## The Real Power of Local Models
|
||||
|
||||
Let’s not sugarcoat it: For developers, the real value of AI isn’t in chatbots or viral social media trends. It’s in building tools that can automate, analyze, and enhance existing workflows. Think:
|
||||
|
||||
* Summarizing thousands of documents in seconds.
|
||||
* Automating customer support with natural language processing.
|
||||
* Creating dynamic content for apps and websites on the fly.
|
||||
|
||||
This is where AI shines—and where OpenAI has been losing market and mind share. Their focus on consumer-facing tools like ChatGPT has made them a household name, but it’s also left developers feeling overlooked. Now, with GPT-OSS, OpenAI is trying to bridge that gap. But will they succeed? Or are they just too late to the party?
|
||||
|
||||
## The Dark Side of Monocultures
|
||||
|
||||
One thing I’m deeply concerned about is the potential for a monoculture in AI. If OpenAI manages to dominate the open-source space with GPT-OSS, we could end up in a world where everyone uses variations of the same model. It’s not just about vendor lock-in—it’s about stifling innovation. When every developer uses the same tools and approaches, we lose the diversity that drives progress.
|
||||
|
||||
I want to see a future where there are **multiple open-source or at the very least open weight models**, each with their own strengths and weaknesses. That way, developers can choose what works best for their needs instead of being forced into one ecosystem.
|
||||
|
||||
## Testing the Waters: My Journey With GPT-OSS
|
||||
|
||||
This blog post was partly written by GPT-oss-20b. It’s fast, it’s local, and it’s surprisingly good at generating content. But is it better than open weight alternatives like Deepseek or Gemma (the open weight gemini)? That’s the million-dollar question.
|
||||
|
||||
I’ve been testing out various models for my own projects, and I can say this much: GPT-OSS feels like a solid contender. It’s fast, easy to integrate, and—dare I say it—fun to work with. But until I put it head-to-head with other models, I won’t be ready to crown it the king of AI.
|
||||
|
||||
## Final Thoughts: The Future of AI is in Our Hands
|
||||
|
||||
The release of GPT-OSS is a big deal—not just for OpenAI, but for the entire AI community. It’s a reminder that even closed-source giants can (and should) listen to their users. But let’s not get carried away. OpenAI isn’t the only game in town anymore. Models like Gemini, Claude in the proprietary space, and Qwen and Llama in open source space are proving that diversity is key to innovation.
|
||||
|
||||
As developers, we have the power to choose which models succeed—and by extension, shape the future of AI. Let’s make sure we’re making choices that benefit the community as a whole, not just a single company. After all, the last thing we need is another **AI monoculture**.
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 201 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 212 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 292 KiB |
@ -1,114 +0,0 @@
|
||||
Title: Integrating Ollama and Matrix with Baibot
|
||||
Date: 2025-06-25 20:00
|
||||
Modified: 2025-06-30 08:00
|
||||
Category: AI, Data, Matrix
|
||||
Tags: ai, kubernetes, matrix
|
||||
Slug: ollama-matrix-integration
|
||||
Authors: Andrew Ridgway
|
||||
Summary: Integrating a Local LLM to a personal matrix server all the fun AND data sovereignty
|
||||
|
||||
### _Human Introduction_
|
||||
I've been experimenting with AI and integrations I'm particuarly excited by the idea of using LLM's to integrate between different systems (Stay tuned for a blog [MCP](https://modelcontextprotocol.io/introduction) at some point in the future!)
|
||||
|
||||
Below I've thrown together some notes and had AI build a very quick how to on a cool little project that took next to no time to put together that I thought might be interesting for the group.. Enjoy!
|
||||
|
||||
|
||||
|
||||
# Matrix AI Integrations with baibot: A Fun Journey into Home Automation and LLMs
|
||||
|
||||
Alright, so I’ve been messing around with this cool project called **baibot**, which is a locally deployable bot for integrating Large Language Models (LLMs) into Matrix chatrooms. If you’re anything like me, you run your own Matrix server to keep things private and under control—whether it’s for family communication or interacting with the tech community. But one day, I thought, “Why not have my LLMs right where I’m already managing everything else?” Enter baibot.
|
||||
|
||||
**Setting Up My Own Matrix Server with baibot**
|
||||
|
||||
First off, I’ve got a home Matrix server running Element. Integrating baibot into this environment makes sense because it allows me to connect directly via the same platform. The key was getting the configuration right using examples from [baibot’s GitHub](https://github.com/etkecc/baibot/blob/main/docs/sample-provider-configs/ollama.yml). For instance, connecting to an Ollama gemma3 model with a specific prompt ensures it’s lighthearted yet responsive:
|
||||
|
||||
```yaml
|
||||
base_url: http://<my_ollama_ip>:11434/v1
|
||||
text_generation:
|
||||
model_id: gemma3:latest
|
||||
prompt: 'You are a lighthearted bot...'
|
||||
temperature: 0.9
|
||||
max_response_tokens: 4096
|
||||
max_context_tokens: 128000
|
||||
```
|
||||
|
||||
This gives me precise control over the bot’s behavior, ensuring each instance in Matrix rooms behaves exactly as intended.
|
||||
|
||||
**Deploying to Kubernetes**
|
||||
|
||||
To ensure reliability, I used Kubernetes. Here's a breakdown of the key files:
|
||||
|
||||
* **Deployment.yaml**: Manages pod replicas, security contexts, and volume mounts for persistence.
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: ridgway-bot
|
||||
name: ridgway-bot
|
||||
spec:
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- image: ghcr.io/etkecc/baibot:v1.7.4
|
||||
name: baibot
|
||||
volumeMounts:
|
||||
- name: ridgway-bot-cm
|
||||
mountPath: /app/config.yml
|
||||
- name: ridgway-bot-pv
|
||||
mountPath: /data
|
||||
volumes:
|
||||
- name: ridgway-bot-cm
|
||||
configMap:
|
||||
name: ridgway-bot
|
||||
- name: ridgway-bot-pv
|
||||
persistentVolumeClaim:
|
||||
claimName: ridgway-bot-storage
|
||||
```
|
||||
|
||||
* **Persistent Volume Claim (PVC)** ensures data storage for baibot.
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: ridgway-bot-storage
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
resources:
|
||||
requests:
|
||||
storage: 500Mi
|
||||
```
|
||||
|
||||
The deployment script handles namespace creation, config maps, PVCs, and waits for the pod to be ready before copying data.
|
||||
|
||||
**Integrating with OpenWebUI for RAG**
|
||||
|
||||
Another cool aspect is integrating baibot with **OpenWebUI**, which acts as an OpenAI-compatible API. This allows me to leverage models I’ve created in OpenWebUI that include knowledge bases (RAG). The config here uses OpenWebUI’s endpoints:
|
||||
|
||||
```yaml
|
||||
base_url: 'https://<my-openwebui-endpoint>/api/'
|
||||
api_key: <my-openwebui-api-key>
|
||||
text_generation:
|
||||
model_id: andrew-knowledge-base
|
||||
prompt: 'Your name is Rodergast...'
|
||||
```
|
||||
|
||||
This setup lets me access RAG capabilities directly within Matrix chats, all without writing a single line of code. It’s like having my very own AI research assistant right there in the chatroom.
|
||||
|
||||
**Future Steps and Challenges**
|
||||
|
||||
Now that baibot is up and running, I’m already thinking about expanding its use cases. The next step might be integrating it with **Home Assistant** for alarm notifications or other automation tasks. However, my current setup uses an older gaming PC, which struggles with computational demands. This could lead to a rearchitecting effort—perhaps moving to a dedicated server or optimizing the hardware.
|
||||
|
||||
**Conclusion**
|
||||
|
||||
Baibot has been a fantastic tool for experimenting with AI integrations in Matrix. By leveraging existing infrastructure and OpenWebUI’s capabilities, I’ve achieved full control over data privacy and customization. The next frontier is expanding these integrations into more practical applications like home automation. Stay tuned for updates!
|
||||
|
||||
**Final Thoughts**
|
||||
|
||||
It’s incredibly rewarding to see how open-source projects like baibot democratize AI access. Whether you’re a hobbyist or a pro, having tools that let you run LLMs locally without vendor lock-in is game-changing. If you’re interested in diving deeper, check out the [baibot GitHub](https://github.com/etkecc/baibot) and explore its documentation. Happy coding!
|
||||
@ -1,93 +0,0 @@
|
||||
Title: MCP and Ollama - Local Assistant is getting nearer
|
||||
Date: 2025-07-24 20:00
|
||||
Modified: 2025-07-24 20:00
|
||||
Category: AI
|
||||
Tags: tech, ai, ollama, mcp, ai-tools
|
||||
Slug: mcp-ollama-local-assistant-soon
|
||||
Authors: Andrew Ridgway
|
||||
Summary: An Exploration of the Model Context Protocol and its potential to revolutionise how we interact with AI
|
||||
|
||||
## Human Introduction
|
||||
So for today's blog I've upped the model paramters on both the editors and a couple drafters.. and I have to say I think we've nailed what my meagre hardware can achieve in terms of content production. The process take 30 more minutes than before to churn now but that quality output more than makes up for it. For context we are now using:
|
||||
|
||||
- _Editor_: Gemma3:27b
|
||||
- _Journalist 1_: phi4-mini:latest
|
||||
- _Journalist 2_: phi4:latest
|
||||
- _Journalist 3_: deepseek-r1:14b <-> _I know but it **is** good even if it won't talk about Tiananmen Square_
|
||||
- _Journalist 4_: qwen3:14b
|
||||
|
||||
As you can see if you compare some of the other blogs this blog has really nailed tone and flow. Some of the content was wrong.. it thought I "wrote" [MCPO](https://github.com/open-webui/mcpo), I didn't, I wrapped it, and the sign off was very cringe but otherwise the blog is largely what came out from the editor.
|
||||
|
||||
As I get better hardware and can run better models, I fully see this being something that could potentially not need much editing on this side.. have to see how it goes moving forward... anyways, without futher adieu, Behold.. MCP and Ollama - A blog _**about**_ AI _**by**_ AI
|
||||
|
||||
## Introduction: Beyond the Buzzwords – A Real Shift in AI
|
||||
|
||||
For the last couple of weeks, I’ve been diving deep into **MCP** – both for work and personal projects. It’s that weird intersection where hobbies and professional life collide. Honestly, I was starting to think the whole AI hype was just that – hype. But MCP? It’s different. It’s not just another buzzword; it feels like a genuine shift in how we interact with AI. It’s like finally getting a decent internet connection after years of dial-up.
|
||||
|
||||
The core of this change is the **Model Context Protocol** itself. It’s an open specification, spearheaded by **Anthropic**, but rapidly gaining traction across the industry. Google’s thrown its weight behind it with [MCP Tools](https://google.github.io/adk-tools/mcp-tools/), and Amazon’s building it into [Bedrock Agent Core](https://aws.amazon.com/bedrock/agent-core/). Even Apple, with its usual air of exclusivity, is likely eyeing this space.
|
||||
|
||||
## What *Is* MCP, Anyway? Demystifying the Protocol
|
||||
|
||||
Okay, let’s break it down. **MCP** is essentially a standardized way for **Large Language Models (LLMs)** to interact with **tools**. Think of it as giving your AI a set of keys to your digital kingdom. Instead of just *talking* about doing things, it can actually *do* them.
|
||||
|
||||
Traditionally, getting an LLM to control your smart home, access your code repository, or even just send an email required a ton of custom coding and API wrangling. MCP simplifies this process by providing a common language and framework. It’s like switching from a bunch of incompatible power adapters to a universal charger.
|
||||
|
||||
The beauty of MCP is its **openness**. It’s not controlled by a single company, which fosters innovation and collaboration. It’s a bit like the early days of the internet – a wild west of possibilities.
|
||||
|
||||
## My MCP Playground: Building a Gateway with mcpo
|
||||
|
||||
I wanted to get my hands dirty, so I built a little project wrapping [**mcpo**](https://github.com/open-webui/mcpo) in a container that can pull in config to create a containerised service. It’s a gateway that connects **OpenWebUI** – a fantastic tool for running LLMs locally – with various **MCP servers**.
|
||||
|
||||
The goal? To create a flexible and extensible platform for experimenting with different AI agent tools within my build pipeline. I wanted to be able to quickly swap out different models, connect to different services, and see what happens. It’s a bit like having a LEGO set for AI – you can build whatever you want.
|
||||
|
||||
You can check out the project [here](https://git.aridgwayweb.com/armistace/mcpo_mcp_servers). If you’re feeling adventurous, I encourage you to clone it and play around. I’ve got it running in my **k3s cluster** (a lightweight Kubernetes distribution), but you can easily adapt it to Docker or other containerization platforms.
|
||||
|
||||
## Connecting the Dots: Home Assistant and Gitea Integration
|
||||
|
||||
Right now my wrapper supports two key services: **Home Assistant** and **Gitea**.
|
||||
|
||||
**Home Assistant** is my smart home hub – it controls everything from the lights and thermostat to the security system. Integrating it with mcpo allows me to control these devices using natural language commands. Imagine saying, “Hey AI, dim the lights and play some jazz,” and it just happens. It’s like living in a sci-fi movie.
|
||||
|
||||
**Gitea** is my self-hosted Git service – it’s where I store all my code. Integrating it with mcpo allows me to use natural language to manage my repositories, create pull requests, and even automate code reviews. It’s like having a personal coding assistant.
|
||||
|
||||
I initially built a custom **Gitea MCP server** to get familiar with the protocol. But the official **Gitea-MCP** project ([here](https://gitea.com/gitea/gitea-mcp)) is much more robust and feature-rich. It’s always best to leverage existing tools when possible.f
|
||||
|
||||
Bringing in new MCP servers should be as simple as updating the config to provide a new endpoint and, if using stdio, updating the build script to bring in the mcp binary or git repo with the mcp implementation you want to use.
|
||||
|
||||
## The Low-Parameter Model Challenge: Balancing Power and Efficiency
|
||||
|
||||
I’m currently experimenting with **low-parameter models** like **Qwen3:4B** and **DeepSeek-R1:14B**. These models are relatively small and efficient, which makes them ideal for running on local hardware. However, they also have limitations.
|
||||
|
||||
One of the biggest challenges is getting these models to understand complex instructions. They require very precise and detailed prompts. It’s like explaining something to a child – you have to break it down into simple steps.
|
||||
|
||||
Another challenge is managing the context window. These models have a limited memory, so they can only remember a certain amount of information. This can make it difficult to have long and complex conversations.
|
||||
|
||||
## The Future of AI Agents: Prompt Engineering and Context Management
|
||||
|
||||
I believe the future of AI lies in the development of intelligent **agents** that can seamlessly interact with the world around us. These agents will need to be able to understand natural language, manage complex tasks, and adapt to changing circumstances.
|
||||
|
||||
**Prompt engineering** will be a critical skill for building these agents. We’ll need to learn how to craft prompts that elicit the desired behavior from the models. Almost like coding in a way but with far less structure and no need to understand the "syntax". But we're a long way from here yet
|
||||
|
||||
**Context management** will also be crucial. We’ll need to develop techniques for storing and retrieving relevant information, so the models can make informed decisions.
|
||||
|
||||
## Papering Over the Cracks: Using MCP to Integrate Legacy Systems
|
||||
|
||||
At my workplace, we’re exploring how to use MCP to integrate legacy systems. Many organizations have a patchwork of different applications and databases that don’t easily communicate with each other.
|
||||
|
||||
MCP can act as a bridge between these systems, allowing them to share data and functionality. It’s like building a universal translator for your IT infrastructure.
|
||||
|
||||
This can significantly reduce the cost and complexity of integrating new applications and services, if we get the boilerplate right.
|
||||
|
||||
## Conclusion: The Dawn of a New Era in AI
|
||||
|
||||
MCP is not a silver bullet, but it’s a significant step forward in the evolution of AI. It provides a standardized and flexible framework for building intelligent agents that can seamlessly interact with the world around us.
|
||||
|
||||
I’m excited to see what the future holds for this technology. I believe it has the potential to transform the way we live and work.
|
||||
|
||||
If you’re interested in learning more about MCP, I encourage you to check out the official website ([https://modelcontextprotocol.io/introduction](https://modelcontextprotocol.io/introduction)) and explore the various projects and resources that are available.
|
||||
|
||||
And if you’re feeling adventurous, I encourage you to clone my mcpo project ([https://git.aridgwayweb.com/armistace/mcpo_mcp_servers](https://git.aridgwayweb.com/armistace/mcpo_mcp_servers)) and start building your own AI agents.
|
||||
|
||||
It's been a bit of a ride. Hopefully I'll get a few more projects that can utilise some of these services but with so much new stuff happening my 'ooo squirell' mentality could prove a bit of a headache... might be time to crack open the blog_creator and use crew ai and mcp to create some research assistants on top of the drafters and editor!
|
||||
|
||||
Talk soon!
|
||||
@ -1,93 +0,0 @@
|
||||
Title: Recovering Archlinux Qemu VM in Proxmox
|
||||
Date: 2025-07-01 20:00
|
||||
Modified: 2025-07-01 20:00
|
||||
Category: SysAdmin
|
||||
Tags: System Admin, Proxmox, Qemu, Arch, Kubernetes
|
||||
Slug: recovering-arch-vm-proxmox
|
||||
Authors: Andrew Ridgway
|
||||
Summary: An absolute nightmare of a day trying to recover my kube cluster from a silly update error
|
||||
|
||||
### Human Edit
|
||||
This is probably the most amazing run of the blog creator, I've started using the new gemma3n and also upgrade the box ollama runs on so it can run slightly bigger models. Using phi4 and gemma:27b has produced some amazing results see below
|
||||
|
||||
I *did* need to update some of the pacman stuff as it conflated to seperate issues so bear in mind I have made some little edits in that place but otherwise... this is straight from the mouth of the llm. Enjoy!
|
||||
|
||||
# Recovering an Archlinux QEMU VM in Proxmox: A Day in Hell and Back Again
|
||||
|
||||
Well that was a morning. Today I wanted to try and fix my Longhorn installation in Kube... (again 😥). It turns out, things didn't go as planned.
|
||||
|
||||
## The Unexpected Downfall
|
||||
|
||||
I went to perform my usual update and reboot... except today for whatever reason, the upgrade decided to fail to install the kernel and left me with an unbootable system.
|
||||
|
||||
### Dropping into Grub Rescue
|
||||
|
||||
At this point I dropped back down to grub rescue mode (which is always fun). Honestly? I hate that environment! And then it hit me: these systems are just QEMU disks, right? Surely I can mount them, chroot in, and fix the install.
|
||||
|
||||
## The Quest for Recovery
|
||||
|
||||
It took 2 hours of frantic Googling through Proxmox and Arch forums until I stumbled upon something... almost magical.
|
||||
|
||||
### Mounting QEMU Disks Made Easy
|
||||
|
||||
I found an amazing suite of tools to make mounting these qemu disks a breeze. Check out this [guide](https://www.howtogeek.com/devops/how-to-mount-a-qemu-virtual-disk-image/) for all the details on libguestfs-tools and guestmount.
|
||||
|
||||
#### Mounting in Action
|
||||
|
||||
```bash
|
||||
sudo apt install libguestfs-tools
|
||||
sudo guestmount --add /var/lib/pve/local-btrfs/images/100/vm-100-disk-0/disk.raw --mount /dev/sda3 /tmp/kube_disk/
|
||||
```
|
||||
|
||||
### Enter Chroot Land
|
||||
|
||||
Now that I've got my disk mounted, it's time to chroot in. But hold up! I need it as root this time.
|
||||
|
||||
#### Setting Up Arch-Chroot
|
||||
|
||||
```bash
|
||||
sudo apt install arch-installation-scripts
|
||||
arch-chroot /tmp/kube_disk/
|
||||
```
|
||||
|
||||
### Pacman: The Hero We Deserve (But Need Permission)
|
||||
|
||||
Oh boy, pacman threw 23 million permission errors my way. Last year they changed it to work rootless by default… but I found out you can turn off the `DefaultUser` flag in `/etc/pacman.conf`. Here's how:
|
||||
|
||||
```bash
|
||||
# Disable DefaultUser temporarily for this session (or remove if permanent)
|
||||
pacman -Syu
|
||||
```
|
||||
|
||||
I did have a couple issues installing the kernel (which is what got borked in the update)
|
||||
```bash
|
||||
# Sometimes some files got written so use overwrite to get rid of them
|
||||
# be warned this *could* be destructive
|
||||
pacman -S linux --overwrite "*"
|
||||
```
|
||||
|
||||
### Clean Up and Exit
|
||||
|
||||
Once we're done, we need to exit the chroot. Remember that crucial step: umounting correctly.
|
||||
|
||||
```bash
|
||||
exit
|
||||
sudo umount /tmp/kube_disk/
|
||||
```
|
||||
|
||||
## The Reboot Saga (And How Not To Do It)
|
||||
|
||||
Reboot was supposed to be a smooth sail… but I made one fatal mistake.
|
||||
|
||||
### Corruption Nightmare
|
||||
|
||||
I didn't unmount before starting the VM. This led me down an unfortunate path of corrupting my btrfs partition beyond recognition and having to rebuild not just my master node, but *entire* cluster! Backups saved the day... barely!
|
||||
|
||||
#### Lessons Learned
|
||||
|
||||
* **Never** reboot without first properly umounting.
|
||||
* Seriously need more backups for those images. 🚀
|
||||
|
||||
## Conclusion: A Cluster-Fucked Day Turned Into a Learning Experience
|
||||
|
||||
All in all it was chaos, but hey – learning happens at 2 AM after midnight reboots and frantic Googling. Hope this helps anyone else stuck with Proxmox woes! 🚀
|
||||
@ -1,53 +0,0 @@
|
||||
Title: The Failing Social Media Ban
|
||||
Date: 2025-06-19 20:00
|
||||
Modified: 2025-06-20 20:00
|
||||
Category: Politics
|
||||
Tags: politics, social meda, tech policy
|
||||
Slug: social-media-ban-fail
|
||||
Authors: Andrew Ridgway
|
||||
Summary: The Social Media ban is an abject failure of policy. Education and the use of the much better existing tools is the key
|
||||
|
||||
## 🎯 The Goal: A Legal Framework to Protect Kids
|
||||
|
||||
The Australian government’s or should I say Julie Inman's plan to ban social media for teens has sparked on going debate. While the intention is noble—protecting minors from online risks—it’s clear the technical and legal hurdles are massive. This government concept of relying on “facial aging” or “Proof of Age” APIs are prone to privacy violations and data breaches. Parents already have tools that let them make decisions about their children’s tech use without needing to hand over photos of their ID. The governments current approach is mired in bureaucracy and the tech world does not thrive in that environment. Instead of trying to outsource the problem to consultants, the government should **educate parents on the tools already available**.
|
||||
|
||||
## 🧩 The Problem: Tech Giants Won’t Do It
|
||||
|
||||
The government’s plan to enable Inman's vision is to use facial recognition or “age-based” filters. This was flawed from the start. These systems are expensive, unreliable, and not designed for the scale of a national rollout. Even if a company like Meta or Google could do it, they’d **never** do it for the same reason: **There is no money in the equation**. The only alternative is to outsource to consultants, but those consultants are not equipped to handle the complexity. The government’s plan is a joke, no one is going to build a system that’s 100% accurate, secure, and compliant with privacy laws and those that, maybe, could have no insentive to. No amount of chest thumping by The E-Safety Commissioner will change this fact and throwing frankly meaningless pieces of paper from our legislative assembly will do little more than make them laugh
|
||||
|
||||
## 🛠️ The Tools Parents Already Have
|
||||
|
||||
Parents ([Is it parents? is it in fact fiefdom creation on behalf of Julie Inman?](https://minister.infrastructure.gov.au/rowland/media-release/record-investment-improve-safety-australians-online)) must give up on the idea of the government fixing this. , parents should be using the **tools already in their homes**. These tools are **free, secure, and effective**. Some examples include (and I use in my own home):
|
||||
|
||||
* **Fritz Box Parental Controls** (https://en.fritz.com/service/knowledge-base/dok/FRITZ-Box-7530/8_Restricting-internet-use-with-the-FRITZ-Box-parental-controls/) - Allows blocking of websites and apps, setting time limits, and creating user profiles.
|
||||
* **Microsoft Family Safety** (https://www.microsoft.com/en-au/microsoft-365/family-safety) - Provides screen time limits, content filters, and activity reporting.
|
||||
* **Nintendo Parental Controls** (https://www.nintendo.com/au/apps/parental-controls/) - Allows managing game time, content restrictions, and communication settings on Nintendo devices.
|
||||
* **Google Family Link** (https://families.google.com/familylink/) - Enables remote monitoring, app management, and location tracking for children's Android devices.
|
||||
* **Apple Family Sharing** (https://support.apple.com/en-au/105121) - Allows sharing purchases, subscriptions, and location information with family members.
|
||||
|
||||
These tools let parents **block apps, limit screen time, and monitor online activity** without needing to share sensitive data. They offer parents full control over what is available and are not dependant on some arbitrary list governed in legislation (which is in an of itself an indicator of how backwards this legislation is)
|
||||
|
||||
## 📚 The Real Solution: Education, Not Tech
|
||||
|
||||
The government’s plan is a **mistake**. Instead of trying to build a new system, parents should be **educating themselves on the tools already available**.
|
||||
|
||||
### 🔄 Flexibility for Every Family
|
||||
|
||||
* **Approved apps**
|
||||
* **Blacklisted content**
|
||||
* **Screen time limits**
|
||||
* **Privacy controls**
|
||||
|
||||
These tools let parents **make decisions tailored to their children’s needs**. No one-size-fits-all approach. It gives parents autonomy over their online decision making whilst better respecting everyones privacy, including the childs. Already Julie is making calls to expand the list, this is unacceptable, it is no one but MY choice what is acceptable in my house and for my family.
|
||||
|
||||
## 🧩 Why the Government’s Plan Fails
|
||||
|
||||
The government’s plan is a **disaster**. It’s not about fixing the problems of social media use in teens, it’s about giving the perception they are doing something about it using archaic methods and tools that don't go to the root cause. The tools parents already have are **better, cheaper, and more secure**. The only way to make this work is for the government to **stop trying to solve a social problem with tech** and **focus on the real solution: education and parental autonomy**. Stop Letting Julie create her cartel and create her own version of the Chinese firewall
|
||||
|
||||
## 📝 Summary: The Right Tools, Not the Tech
|
||||
|
||||
The government’s plan is a dead monkey. Instead of trying to build a system that’s 100% accurate and secure, parents should be using the **tools already in their homes**. These tools are **free, effective, and preserve privacy**. They let parents **make decisions about their children’s tech use on a true case by case basis** without needing to hand over sensitive data.
|
||||
|
||||
## 🧩 Final Thoughts
|
||||
|
||||
The Government's plan, at the behest of Julie Inman, is a **disaster**. It’s not about fixing the problem with social media, it’s about creating the perception they are solving a problem that is already solved. [The E-Safety Commissioner has made clear her plans are to take control out of our hands when it comes to what we can do online](https://www.esafety.gov.au/newsroom/media-releases/online-industry-asked-address-esafetys-concerns-draft-codes-0#:~:text=Online%20industry%20asked%20to%20address%20eSafety%27s%20concerns%20with%20draft%20codes,-Share&text=Australia%27s%20eSafety%20Commissioner%20has%20asked,safeguards%20for%20users%20in%20Australia.) Parents should be using the **tools already in their homes**. The real solution is not to expect a government to fix this, but to **educate themselves on the tools that already exist**. Until we accept that this is our responsbility the problem will continue propogate because the only place it can be fixed is in the home and not my Julie Inam.
|
||||
@ -1,16 +1,5 @@
|
||||
Title: When to use AI
|
||||
Date: 2025-06-05 20:00
|
||||
Modified: 2025-06-06 08:00
|
||||
Category: AI, Data
|
||||
Tags: ai, python
|
||||
Slug: when-to-use-ai
|
||||
Authors: Andrew Ridgway
|
||||
Summary: Should we be using AI for ALL THE THINGS!?
|
||||
|
||||
|
||||
# Human Introduction
|
||||
Well.. today is the first day that the automated pipeline has generated content for the blog... still a bit of work to do including
|
||||
|
||||
1. establishing a permanent vectordb solution (chromadb? pg_vector?)
|
||||
2. Notification to Matrix that something has happened
|
||||
3. Updating Trilium so that the note is marked as blog_written=true
|
||||
@ -23,60 +12,43 @@ Anyways, without further ado, I present to you the first, pipeline written, AI c
|
||||
|
||||
---
|
||||
|
||||
# When to Use AI: Navigating the Right Scenarios
|
||||
# When to use AI 😄
|
||||
*A journalist, software developer, and DevOps expert’s take on when AI is overkill and when it’s just the right tool*
|
||||
|
||||
Okay, so I've been getting this question a lot lately: "When should we use AI?" or even more frustratingly, "Why can't AI do this?" It's like asking when to use a hammer versus a screwdriver. Sometimes AI is the perfect tool, other times it's better left in the toolbox. Let me break down some scenarios where AI shines and where it might not be the best bet.
|
||||
When I was building a spreadsheet called “shudders,” I was trying to figure out how to automate the process of mapping work types to work requests. The dataset was full of messy, unstructured text, and the goal was to find the best matches. At first, I thought, “This is a perfect use case for AI!” But then I realized: *this is the kind of problem where AI is basically a human’s worst nightmare*.
|
||||
|
||||
## The Spreadsheet Dilemma: Where AI Can help, and where it hurts
|
||||
So, let’s break it down.
|
||||
|
||||
**Scenario:** Mapping work types to categories in a spreadsheet with thousands of entries, like distinguishing between "Painting," "Repainting," "Deck Painting," or "Stucco Repainting."
|
||||
### 🧠 When AI is *not* the answer
|
||||
|
||||
**Where AI Helps:**
|
||||
AI is great at pattern recognition, but it’s not great at *understanding context*. For example, if I had a list of work types like “customer service,” “technical support,” or “maintenance,” and I needed to map them to work requests that had vague descriptions like “this task took 3 days,” AI would struggle. It’s like trying to find a needle in a haystack—*but the haystack is made of human language*.
|
||||
|
||||
* **Fuzzy Matching & Contextual Understanding:** AI excels at interpreting relationships between words (e.g., recognizing "Deck Painting" as a subset of "Painting"). However, traditional methods with regex or string manipulation fail here because they lack the nuanced judgment needed to handle ambiguity.
|
||||
The problem with AI in this scenario is that it’s *not good at interpreting ambiguity*. If the work types are vague, the AI might mislabel them, leading to errors. Plus, when the data is messy, AI can’t keep up. I remember one time I tried to use a chatbot to classify work requests. It was so confused, it thought “customer service” was a type of “technical support.” 😅 The result? A spreadsheet full of “unknown” entries.
|
||||
|
||||
**Where AI Struggles:**
|
||||
### 🧮 When AI *is* the answer
|
||||
|
||||
* **Precision Over Ambiguity:** Calculations requiring exact values (e.g., average durations) are better handled by deterministic algorithms rather than AI’s probabilistic approach.
|
||||
There are some scenarios where AI is *definitely* the way to go. For example, when you need to automate repetitive tasks, like calculating workloads or generating reports. These tasks are math-heavy and don’t require creative thinking. Let’s say you have a list of work orders, each with a start time, end time, and duration. You want to calculate the average time per task. AI can do that with precision. It’s like a calculator, but with a personality.
|
||||
|
||||
**Traditional Methods Are Easier for Deterministic Problems:**
|
||||
Another example: if you need to generate a report that summarizes key metrics, AI can handle that. It’s not about creativity, it’s about logic. And that’s where traditional programming shines.
|
||||
|
||||
* **Formula-Based Logic:** Building precise formulas for workload analysis relies on clear, unambiguous rules. AI can’t replace the need for human oversight in such cases.
|
||||
### 🧪 The balance between AI and human oversight
|
||||
|
||||
## When AI Shines: Contextual and Unstructured Tasks
|
||||
AI is a tool, not a replacement for human judgment. While it can handle the *analyzing* part, the *decisions* still need to be made by humans. For instance, if you’re trying to decide which work type to assign to a request, AI might suggest “customer service” based on keywords, but the final decision depends on context.
|
||||
|
||||
**Scenario:** Automating customer support with chatbots or analyzing social media sentiment.
|
||||
So, in the end, AI is a *helper*, not a *replacement*. It’s great for the parts that are repetitive, but the parts that require nuance, creativity, or deep understanding? That’s where humans step in.
|
||||
|
||||
**Why AI Works Here:**
|
||||
### 🧩 Final thoughts
|
||||
|
||||
* **Natural Language Processing (NLP):** AI understands context, tone, and intent in unstructured data, making it ideal for tasks like chatbot responses or content analysis.
|
||||
* **Pattern Recognition:** AI identifies trends or anomalies in large datasets that humans might miss, such as predictive maintenance in industrial settings.
|
||||
AI is like a superpower—great at certain things, not so great at others. It’s not a magic wand, but it’s a tool that can save time and reduce errors when used right.
|
||||
|
||||
**Why Traditional Methods Don't:**
|
||||
So, when is it time to say “AI, nope”? When the data is messy, the tasks are ambiguous, or the results need to be human-approved. And when is it time to say “AI, yes”? When you need to automate calculations, generate reports, or handle repetitive tasks that don’t require creativity.
|
||||
|
||||
* **There is no easily discernable pattern:** If the pattern doesn't exist in a deterministic sense there will be little someone can do without complex regex and 'whack a mole' style programming.
|
||||
### 🧩 Summary
|
||||
|
||||
## Hybrid Approaches: The Future of Efficiency
|
||||
| Scenario | AI? | Reason |
|
||||
|---|---|---|
|
||||
| Ambiguous data | ❌ | AI struggles with context |
|
||||
| Repetitive tasks | ✅ | AI handles math and logic |
|
||||
| Creative decisions | ❌ | AI lacks the ability to think creatively |
|
||||
|
||||
While traditional methods remain superior for precise calculations, AI can assist in setting up initial parameters or generating insights. For example:
|
||||
|
||||
* **AI Proposes Formulas:** An LLM suggests a workload calculation formula based on historical data.
|
||||
* **Human Checks Validity:** A human ensures the formula’s accuracy before deployment.
|
||||
|
||||
## Key Takeaways
|
||||
|
||||
1. **Use AI** for tasks involving:
|
||||
* Unstructured data (e.g., text, images).
|
||||
* Contextual understanding and interpretation.
|
||||
* Pattern recognition and trend analysis.
|
||||
2. **Stick to Traditional Methods** for:
|
||||
* Precise calculations with deterministic logic.
|
||||
* Tasks requiring error-free accuracy (e.g., financial modeling).
|
||||
|
||||
## Conclusion
|
||||
|
||||
AI is a powerful tool but isn’t a one-size-fits-all solution. Match the right approach to the task at hand—whether it’s interpreting natural language or crunching numbers. The key is knowing when AI complements human expertise rather than replaces it.
|
||||
|
||||
**Final Tip:** Always consider the trade-offs between precision and context. For tasks where nuance matters, AI is your ally; for rigid logic, trust traditional methods.
|
||||
|
||||
🚀
|
||||
In the end, AI is just another tool. Use it when it works, and don’t let it define your workflow. 😄 *And if you ever feel like AI is overstepping, remember: it’s just trying to be helpful. Sometimes it’s not the best choice. Sometimes it’s the only choice.*
|
||||
Loading…
x
Reference in New Issue
Block a user