Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
17 commits
Select commit Hold shift + click to select a range
d9eab36
ADDED: Default LLMProvider to APIDash Settings
synapsecode Apr 5, 2025
8a76d53
ADDED: LLMProviderCredentials to APIDash Settings
synapsecode Apr 5, 2025
4238ae0
AI Request Feature Foundations & CollectionStateExtraDetails
synapsecode May 16, 2025
459fe47
Migrated to Custom fork of Stac 0.10 modified to work with Flutter 3.32
synapsecode May 30, 2025
932e738
AI Request Pane Created & LLMModel Pre-Architecture Impl
synapsecode May 31, 2025
b38006e
LLMModelConfigValue is now mutable via getter/setter
synapsecode May 31, 2025
5931d47
LLMModel: Extended Functionality
synapsecode May 31, 2025
c23c062
AIRequest Saving, Loading & Integration into RequestCollection
synapsecode May 31, 2025
a95fbc0
AI Request Sending implementation in CollectionProvider and HTTPService
synapsecode May 31, 2025
aa38f94
Replaced AIVerb with getLLMModelFromID; loadConfigurations impl; usin…
synapsecode May 31, 2025
e306aa7
Local Ollama Model Implemented & Tested
synapsecode May 31, 2025
2024710
Added AzureOpenAIModel & LLMConfigTextValue & more
synapsecode Jun 1, 2025
1707001
Removed LLMProvider Globally and integrated with AIRequests Architecture
synapsecode Jun 1, 2025
ed3c9b3
Enabled Answer & RAW Types as AI Response Output
synapsecode Jun 1, 2025
417c25c
Implemented Editable AI URL display in URL Card
synapsecode Jun 2, 2025
98fd81c
Respecting Default LLMModel & autofilling credentials on Creation
synapsecode Jun 2, 2025
6855f43
extraDetails added to HistoryModel & Implemented AI Requests History
synapsecode Jun 2, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions lib/consts.dart
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,7 @@ enum ResponseBodyView {
preview("Preview", Icons.visibility_rounded),
code("Preview", Icons.code_rounded),
raw("Raw", Icons.text_snippet_rounded),
answer("Answer", Icons.abc),
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Like this new tab Answer!

none("Preview", Icons.warning);

const ResponseBodyView(this.label, this.icon);
Expand Down
1 change: 1 addition & 0 deletions lib/models/history_meta_model.g.dart

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 4 additions & 0 deletions lib/models/history_request_model.dart
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,10 @@ class HistoryRequestModel with _$HistoryRequestModel {
required HistoryMetaModel metaData,
required HttpRequestModel httpRequestModel,
required HttpResponseModel httpResponseModel,
//ExtraDetails for anything else that can be included
@JsonKey(fromJson: customMapFromJson, toJson: customMapToJson)
@Default({})
Map extraDetails,
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

extraDetails is not the right approach.
Create a new

AIRequestModel aiRequestModel

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

it should switch between different request/response models based on apiType

}) = _HistoryRequestModel;

factory HistoryRequestModel.fromJson(Map<String, Object?> json) =>
Expand Down
68 changes: 55 additions & 13 deletions lib/models/history_request_model.freezed.dart
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,10 @@ mixin _$HistoryRequestModel {
String get historyId => throw _privateConstructorUsedError;
HistoryMetaModel get metaData => throw _privateConstructorUsedError;
HttpRequestModel get httpRequestModel => throw _privateConstructorUsedError;
HttpResponseModel get httpResponseModel => throw _privateConstructorUsedError;
HttpResponseModel get httpResponseModel =>
throw _privateConstructorUsedError; //ExtraDetails for anything else that can be included
@JsonKey(fromJson: customMapFromJson, toJson: customMapToJson)
Map<dynamic, dynamic> get extraDetails => throw _privateConstructorUsedError;

/// Serializes this HistoryRequestModel to a JSON map.
Map<String, dynamic> toJson() => throw _privateConstructorUsedError;
Expand All @@ -45,7 +48,9 @@ abstract class $HistoryRequestModelCopyWith<$Res> {
{String historyId,
HistoryMetaModel metaData,
HttpRequestModel httpRequestModel,
HttpResponseModel httpResponseModel});
HttpResponseModel httpResponseModel,
@JsonKey(fromJson: customMapFromJson, toJson: customMapToJson)
Map<dynamic, dynamic> extraDetails});

$HistoryMetaModelCopyWith<$Res> get metaData;
$HttpRequestModelCopyWith<$Res> get httpRequestModel;
Expand All @@ -71,6 +76,7 @@ class _$HistoryRequestModelCopyWithImpl<$Res, $Val extends HistoryRequestModel>
Object? metaData = null,
Object? httpRequestModel = null,
Object? httpResponseModel = null,
Object? extraDetails = null,
}) {
return _then(_value.copyWith(
historyId: null == historyId
Expand All @@ -89,6 +95,10 @@ class _$HistoryRequestModelCopyWithImpl<$Res, $Val extends HistoryRequestModel>
? _value.httpResponseModel
: httpResponseModel // ignore: cast_nullable_to_non_nullable
as HttpResponseModel,
extraDetails: null == extraDetails
? _value.extraDetails
: extraDetails // ignore: cast_nullable_to_non_nullable
as Map<dynamic, dynamic>,
) as $Val);
}

Expand Down Expand Up @@ -135,7 +145,9 @@ abstract class _$$HistoryRequestModelImplCopyWith<$Res>
{String historyId,
HistoryMetaModel metaData,
HttpRequestModel httpRequestModel,
HttpResponseModel httpResponseModel});
HttpResponseModel httpResponseModel,
@JsonKey(fromJson: customMapFromJson, toJson: customMapToJson)
Map<dynamic, dynamic> extraDetails});

@override
$HistoryMetaModelCopyWith<$Res> get metaData;
Expand All @@ -162,6 +174,7 @@ class __$$HistoryRequestModelImplCopyWithImpl<$Res>
Object? metaData = null,
Object? httpRequestModel = null,
Object? httpResponseModel = null,
Object? extraDetails = null,
}) {
return _then(_$HistoryRequestModelImpl(
historyId: null == historyId
Expand All @@ -180,6 +193,10 @@ class __$$HistoryRequestModelImplCopyWithImpl<$Res>
? _value.httpResponseModel
: httpResponseModel // ignore: cast_nullable_to_non_nullable
as HttpResponseModel,
extraDetails: null == extraDetails
? _value._extraDetails
: extraDetails // ignore: cast_nullable_to_non_nullable
as Map<dynamic, dynamic>,
));
}
}
Expand All @@ -192,7 +209,10 @@ class _$HistoryRequestModelImpl implements _HistoryRequestModel {
{required this.historyId,
required this.metaData,
required this.httpRequestModel,
required this.httpResponseModel});
required this.httpResponseModel,
@JsonKey(fromJson: customMapFromJson, toJson: customMapToJson)
final Map<dynamic, dynamic> extraDetails = const {}})
: _extraDetails = extraDetails;

factory _$HistoryRequestModelImpl.fromJson(Map<String, dynamic> json) =>
_$$HistoryRequestModelImplFromJson(json);
Expand All @@ -205,10 +225,20 @@ class _$HistoryRequestModelImpl implements _HistoryRequestModel {
final HttpRequestModel httpRequestModel;
@override
final HttpResponseModel httpResponseModel;
//ExtraDetails for anything else that can be included
final Map<dynamic, dynamic> _extraDetails;
//ExtraDetails for anything else that can be included
@override
@JsonKey(fromJson: customMapFromJson, toJson: customMapToJson)
Map<dynamic, dynamic> get extraDetails {
if (_extraDetails is EqualUnmodifiableMapView) return _extraDetails;
// ignore: implicit_dynamic_type
return EqualUnmodifiableMapView(_extraDetails);
}

@override
String toString() {
return 'HistoryRequestModel(historyId: $historyId, metaData: $metaData, httpRequestModel: $httpRequestModel, httpResponseModel: $httpResponseModel)';
return 'HistoryRequestModel(historyId: $historyId, metaData: $metaData, httpRequestModel: $httpRequestModel, httpResponseModel: $httpResponseModel, extraDetails: $extraDetails)';
}

@override
Expand All @@ -223,13 +253,20 @@ class _$HistoryRequestModelImpl implements _HistoryRequestModel {
(identical(other.httpRequestModel, httpRequestModel) ||
other.httpRequestModel == httpRequestModel) &&
(identical(other.httpResponseModel, httpResponseModel) ||
other.httpResponseModel == httpResponseModel));
other.httpResponseModel == httpResponseModel) &&
const DeepCollectionEquality()
.equals(other._extraDetails, _extraDetails));
}

@JsonKey(includeFromJson: false, includeToJson: false)
@override
int get hashCode => Object.hash(
runtimeType, historyId, metaData, httpRequestModel, httpResponseModel);
runtimeType,
historyId,
metaData,
httpRequestModel,
httpResponseModel,
const DeepCollectionEquality().hash(_extraDetails));

/// Create a copy of HistoryRequestModel
/// with the given fields replaced by the non-null parameter values.
Expand All @@ -250,11 +287,12 @@ class _$HistoryRequestModelImpl implements _HistoryRequestModel {

abstract class _HistoryRequestModel implements HistoryRequestModel {
const factory _HistoryRequestModel(
{required final String historyId,
required final HistoryMetaModel metaData,
required final HttpRequestModel httpRequestModel,
required final HttpResponseModel httpResponseModel}) =
_$HistoryRequestModelImpl;
{required final String historyId,
required final HistoryMetaModel metaData,
required final HttpRequestModel httpRequestModel,
required final HttpResponseModel httpResponseModel,
@JsonKey(fromJson: customMapFromJson, toJson: customMapToJson)
final Map<dynamic, dynamic> extraDetails}) = _$HistoryRequestModelImpl;

factory _HistoryRequestModel.fromJson(Map<String, dynamic> json) =
_$HistoryRequestModelImpl.fromJson;
Expand All @@ -266,7 +304,11 @@ abstract class _HistoryRequestModel implements HistoryRequestModel {
@override
HttpRequestModel get httpRequestModel;
@override
HttpResponseModel get httpResponseModel;
HttpResponseModel
get httpResponseModel; //ExtraDetails for anything else that can be included
@override
@JsonKey(fromJson: customMapFromJson, toJson: customMapToJson)
Map<dynamic, dynamic> get extraDetails;

/// Create a copy of HistoryRequestModel
/// with the given fields replaced by the non-null parameter values.
Expand Down
4 changes: 4 additions & 0 deletions lib/models/history_request_model.g.dart

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

36 changes: 36 additions & 0 deletions lib/models/llm_models/all_models.dart
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There is currently no distinction between LLM models and providers
OpenAI, Gemini, Ollama -> Model Providers
gpt-40, gemini-2.5-flash, llama3.1 -> Models

need to revisit the class design.

Also, create a new package inside packages folder called genai where all LLM models, services and utilities must reside.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Each model provider has their own api and params. And model is one of the parameters which is selected by the end user.

Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
import 'package:apidash/models/llm_models/google/gemini_20_flash.dart';
import 'package:apidash/models/llm_models/llm_model.dart';
import 'package:apidash/models/llm_models/ollama/llama3.dart';
import 'package:apidash/models/llm_models/openai/azure_openai.dart';

// Exports
export 'package:apidash/models/llm_models/google/gemini_20_flash.dart';
export 'package:apidash/models/llm_models/ollama/llama3.dart';

Map<String, (LLMModel instance, LLMModel Function())> availableModels = {
Gemini20FlashModel.instance.modelIdentifier: (
Gemini20FlashModel.instance,
() => Gemini20FlashModel()
),
LLama3LocalModel.instance.modelIdentifier: (
LLama3LocalModel.instance,
() => LLama3LocalModel()
),
AzureOpenAIModel.instance.modelIdentifier: (
AzureOpenAIModel.instance,
() => AzureOpenAIModel()
),
};

LLMModel? getLLMModelFromID(String modelID, [Map? configMap]) {
for (final entry in availableModels.entries) {
if (entry.key == modelID) {
final m = entry.value.$2();
if (configMap != null) {
m.loadConfigurations(configMap);
}
return m;
}
}
return null;
}
146 changes: 146 additions & 0 deletions lib/models/llm_models/google/gemini_20_flash.dart
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
import 'dart:convert';

import 'package:apidash/models/llm_models/llm_config.dart';
import 'package:apidash/models/llm_models/llm_model.dart';
import 'package:apidash_core/apidash_core.dart' as http;

class Gemini20FlashModel extends LLMModel {
static Gemini20FlashModel instance = Gemini20FlashModel();

@override
String provider = 'Google';

@override
String modelName = 'Gemini 2.0 Flash';

@override
String modelIdentifier = 'gemini_20_flash';

@override
LLMModelAuthorizationType authorizationType =
LLMModelAuthorizationType.apiKey;

@override
Map<String, LLMModelConfiguration> configurations = {
'temperature': LLMModelConfiguration(
configId: 'temperature',
configName: 'Temperature',
configDescription:
'Higher values mean greater variability and lesser values mean more deterministic responses',
configType: LLMModelConfigurationType.slider,
configValue: LLMConfigSliderValue(value: (0.0, 0.5, 1.0)),
),
'top_p': LLMModelConfiguration(
configId: 'top_p',
configName: 'Top P',
configDescription: 'Controls the randomness of the LLM Response',
configType: LLMModelConfigurationType.slider,
configValue: LLMConfigSliderValue(value: (0.0, 0.95, 1.0)),
),
'max_tokens': LLMModelConfiguration(
configId: 'max_tokens',
configName: 'Max Tokens',
configDescription:
'The maximum number of tokens to generate. -1 means no limit',
configType: LLMModelConfigurationType.numeric,
configValue: LLMConfigNumericValue(value: -1),
),
};

@override
String providerIcon = 'https://img.icons8.com/color/48/google-logo.png';
Copy link
Member

@animator animator Jun 5, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No external hardcoded image URL is allowed.


@override
LLMModelSpecifics specifics = LLMModelSpecifics(
endpoint:
'https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent',
method: 'POST',
headers: {},
outputFormatter: (resp) {
if (resp == null) return null;
return resp['candidates']?[0]?['content']?['parts']?[0]?['text'];
},
);

@override
Map getRequestPayload({
required String systemPrompt,
required String userPrompt,
required String credential,
}) {
final temp = configurations['temperature']!.configValue.serialize();
final top_p = configurations['top_p']!.configValue.serialize();
final mT = configurations['max_tokens']!.configValue.serialize();
final payload = {
"model": "gemini-2.0-flash",
"contents": [
{
"role": "user",
"parts": [
{"text": userPrompt}
]
}
],
"systemInstruction": {
"role": "system",
"parts": [
{"text": systemPrompt}
]
},
"generationConfig": {
"temperature": (jsonDecode(temp) as List)[1].toString(),
"topP": (jsonDecode(top_p) as List)[1].toString(),
if (mT != '-1') ...{
"maxOutputTokens": mT,
}
}
};
final endpoint = specifics.endpoint;
final url = "$endpoint?key=$credential";
return {
'url': url,
'payload': payload,
};
}

@override
loadConfigurations(Map configMap) {
print('Loaded Gemini Configurations');
final double? temperature = configMap['temperature'];
final double? top_p = configMap['top_p'];
final int? max_tokens = configMap['max_tokens'];

//print('loading configs => $temperature, $top_p, $max_tokens');
if (temperature != null) {
final config = configurations['temperature']!;
configurations['temperature'] =
configurations['temperature']!.updateValue(
LLMConfigSliderValue(value: (
config.configValue.value.$1,
temperature,
config.configValue.value.$3
)),
);
}
if (top_p != null) {
final config = configurations['top_p']!;
configurations['top_p'] = configurations['top_p']!.updateValue(
LLMConfigSliderValue(value: (
config.configValue.value.$1,
top_p,
config.configValue.value.$3
)),
);
}
if (max_tokens != null) {
configurations['max_tokens'] = configurations['max_tokens']!.updateValue(
LLMConfigNumericValue(value: max_tokens),
);
}

// Load Modified Endpoint
if (configMap['modifed_endpoint'] != null) {
specifics.endpoint = configMap['modifed_endpoint']!;
}
}
}
Loading