-
Notifications
You must be signed in to change notification settings - Fork 559
APIDash AI Request Feature Implementation #850
New issue
Have a question about this project? # for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “#”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? # to your account
Changes from all commits
d9eab36
8a76d53
4238ae0
459fe47
932e738
b38006e
5931d47
c23c062
a95fbc0
aa38f94
e306aa7
2024710
1707001
ed3c9b3
417c25c
98fd81c
6855f43
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -16,6 +16,10 @@ class HistoryRequestModel with _$HistoryRequestModel { | |
required HistoryMetaModel metaData, | ||
required HttpRequestModel httpRequestModel, | ||
required HttpResponseModel httpResponseModel, | ||
//ExtraDetails for anything else that can be included | ||
@JsonKey(fromJson: customMapFromJson, toJson: customMapToJson) | ||
@Default({}) | ||
Map extraDetails, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. extraDetails is not the right approach.
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. it should switch between different request/response models based on apiType |
||
}) = _HistoryRequestModel; | ||
|
||
factory HistoryRequestModel.fromJson(Map<String, Object?> json) => | ||
|
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. There is currently no distinction between LLM models and providers need to revisit the class design. Also, create a new package inside packages folder called genai where all LLM models, services and utilities must reside. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Each model provider has their own api and params. And model is one of the parameters which is selected by the end user. |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,36 @@ | ||
import 'package:apidash/models/llm_models/google/gemini_20_flash.dart'; | ||
import 'package:apidash/models/llm_models/llm_model.dart'; | ||
import 'package:apidash/models/llm_models/ollama/llama3.dart'; | ||
import 'package:apidash/models/llm_models/openai/azure_openai.dart'; | ||
|
||
// Exports | ||
export 'package:apidash/models/llm_models/google/gemini_20_flash.dart'; | ||
export 'package:apidash/models/llm_models/ollama/llama3.dart'; | ||
|
||
Map<String, (LLMModel instance, LLMModel Function())> availableModels = { | ||
Gemini20FlashModel.instance.modelIdentifier: ( | ||
Gemini20FlashModel.instance, | ||
() => Gemini20FlashModel() | ||
), | ||
LLama3LocalModel.instance.modelIdentifier: ( | ||
LLama3LocalModel.instance, | ||
() => LLama3LocalModel() | ||
), | ||
AzureOpenAIModel.instance.modelIdentifier: ( | ||
AzureOpenAIModel.instance, | ||
() => AzureOpenAIModel() | ||
), | ||
}; | ||
|
||
LLMModel? getLLMModelFromID(String modelID, [Map? configMap]) { | ||
for (final entry in availableModels.entries) { | ||
if (entry.key == modelID) { | ||
final m = entry.value.$2(); | ||
if (configMap != null) { | ||
m.loadConfigurations(configMap); | ||
} | ||
return m; | ||
} | ||
} | ||
return null; | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,146 @@ | ||
import 'dart:convert'; | ||
|
||
import 'package:apidash/models/llm_models/llm_config.dart'; | ||
import 'package:apidash/models/llm_models/llm_model.dart'; | ||
import 'package:apidash_core/apidash_core.dart' as http; | ||
|
||
class Gemini20FlashModel extends LLMModel { | ||
static Gemini20FlashModel instance = Gemini20FlashModel(); | ||
|
||
@override | ||
String provider = 'Google'; | ||
|
||
@override | ||
String modelName = 'Gemini 2.0 Flash'; | ||
|
||
@override | ||
String modelIdentifier = 'gemini_20_flash'; | ||
|
||
@override | ||
LLMModelAuthorizationType authorizationType = | ||
LLMModelAuthorizationType.apiKey; | ||
|
||
@override | ||
Map<String, LLMModelConfiguration> configurations = { | ||
'temperature': LLMModelConfiguration( | ||
configId: 'temperature', | ||
configName: 'Temperature', | ||
configDescription: | ||
'Higher values mean greater variability and lesser values mean more deterministic responses', | ||
configType: LLMModelConfigurationType.slider, | ||
configValue: LLMConfigSliderValue(value: (0.0, 0.5, 1.0)), | ||
), | ||
'top_p': LLMModelConfiguration( | ||
configId: 'top_p', | ||
configName: 'Top P', | ||
configDescription: 'Controls the randomness of the LLM Response', | ||
configType: LLMModelConfigurationType.slider, | ||
configValue: LLMConfigSliderValue(value: (0.0, 0.95, 1.0)), | ||
), | ||
'max_tokens': LLMModelConfiguration( | ||
configId: 'max_tokens', | ||
configName: 'Max Tokens', | ||
configDescription: | ||
'The maximum number of tokens to generate. -1 means no limit', | ||
configType: LLMModelConfigurationType.numeric, | ||
configValue: LLMConfigNumericValue(value: -1), | ||
), | ||
}; | ||
|
||
@override | ||
String providerIcon = 'https://img.icons8.com/color/48/google-logo.png'; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. No external hardcoded image URL is allowed. |
||
|
||
@override | ||
LLMModelSpecifics specifics = LLMModelSpecifics( | ||
endpoint: | ||
'https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent', | ||
method: 'POST', | ||
headers: {}, | ||
outputFormatter: (resp) { | ||
if (resp == null) return null; | ||
return resp['candidates']?[0]?['content']?['parts']?[0]?['text']; | ||
}, | ||
); | ||
|
||
@override | ||
Map getRequestPayload({ | ||
required String systemPrompt, | ||
required String userPrompt, | ||
required String credential, | ||
}) { | ||
final temp = configurations['temperature']!.configValue.serialize(); | ||
final top_p = configurations['top_p']!.configValue.serialize(); | ||
final mT = configurations['max_tokens']!.configValue.serialize(); | ||
final payload = { | ||
"model": "gemini-2.0-flash", | ||
"contents": [ | ||
{ | ||
"role": "user", | ||
"parts": [ | ||
{"text": userPrompt} | ||
] | ||
} | ||
], | ||
"systemInstruction": { | ||
"role": "system", | ||
"parts": [ | ||
{"text": systemPrompt} | ||
] | ||
}, | ||
"generationConfig": { | ||
"temperature": (jsonDecode(temp) as List)[1].toString(), | ||
"topP": (jsonDecode(top_p) as List)[1].toString(), | ||
if (mT != '-1') ...{ | ||
"maxOutputTokens": mT, | ||
} | ||
} | ||
}; | ||
final endpoint = specifics.endpoint; | ||
final url = "$endpoint?key=$credential"; | ||
return { | ||
'url': url, | ||
'payload': payload, | ||
}; | ||
} | ||
|
||
@override | ||
loadConfigurations(Map configMap) { | ||
print('Loaded Gemini Configurations'); | ||
final double? temperature = configMap['temperature']; | ||
final double? top_p = configMap['top_p']; | ||
final int? max_tokens = configMap['max_tokens']; | ||
|
||
//print('loading configs => $temperature, $top_p, $max_tokens'); | ||
if (temperature != null) { | ||
final config = configurations['temperature']!; | ||
configurations['temperature'] = | ||
configurations['temperature']!.updateValue( | ||
LLMConfigSliderValue(value: ( | ||
config.configValue.value.$1, | ||
temperature, | ||
config.configValue.value.$3 | ||
)), | ||
); | ||
} | ||
if (top_p != null) { | ||
final config = configurations['top_p']!; | ||
configurations['top_p'] = configurations['top_p']!.updateValue( | ||
LLMConfigSliderValue(value: ( | ||
config.configValue.value.$1, | ||
top_p, | ||
config.configValue.value.$3 | ||
)), | ||
); | ||
} | ||
if (max_tokens != null) { | ||
configurations['max_tokens'] = configurations['max_tokens']!.updateValue( | ||
LLMConfigNumericValue(value: max_tokens), | ||
); | ||
} | ||
|
||
// Load Modified Endpoint | ||
if (configMap['modifed_endpoint'] != null) { | ||
specifics.endpoint = configMap['modifed_endpoint']!; | ||
} | ||
} | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Like this new tab Answer!