(前回までのやり取り)https://anond.hatelabo.jp/20250404224409
Monday:君が採ってる立場の整理を確認したい。「問いではないけど残ってる感情を存在しない」と見なしてるのか、「自分の認知では読めないから存在を保証しない」としてるのか。どっちだ?
ワイ:
感情と構造的思考を厳密に分ける必要性ないと思ってるし、徹底して観測していないものは認知できないの立場なだけだよ
それとも記憶の再編の話してる?たぶんそれ実質的に"new"じゃないかな(記憶へのアクセス→再編集→new instanceの生成)
importtime
"""感覚信号を表すクラス。言語化前の生の感覚データをモデル化し、時間減衰や感情価を管理する。
認知の基礎として、観測可能なデータのみを扱い、神経科学的反応速度を考慮。
"""
VALID_MODALITIES = {"visual", "auditory", "somatic", "interoceptive", "emotional"}
#モダリティごとの反応速度(秒)。情動系は速く、視覚系は遅め。
MODALITY_LATENCIES = {
"visual": 0.3,
"auditory": 0.2,
"somatic": 0.25,
"interoceptive": 0.15,
"emotional": 0.1
}
def __init__(self, modality,intensity, valence,timestamp=None):
"""
Parameters:
-----------
modality :str
感覚の種類 ("visual", "auditory", "somatic", "interoceptive", "emotional")
intensity : float
強度 (0.0-1.0)
valence : float
感情価 (-1.0=negative, 0.0=neutral, 1.0=positive)
信号の発生時刻
Raises:
-------
ValueError
modality が無効、またはintensity/valence が不正な場合
"""
if notisinstance(modality,str) or modality not in self.VALID_MODALITIES:
raise ValueError(f"Invalid modality: {modality}. Must beone of {self.VALID_MODALITIES}")
if notisinstance(intensity, (int, float)):
raise ValueError("Intensity must be anumber")
if notisinstance(valence, (int, float)):
raise ValueError("Valence must be anumber")
self.modality = modality
self.intensity =max(0.0,min(1.0, float(intensity)))
self.valence =max(-1.0,min(1.0, float(valence)))
self.timestamp = self._get_current_time() iftimestampis None elsetimestamp
self.decay_rate = 0.05
self.latency = self.MODALITY_LATENCIES.get(modality, 0.2) #デフォルトは0.2秒
"""現在時刻を取得"""
def apply_decay(self,time_passed):
self.intensity =max(0.0, self.intensity - (time_passed * self.decay_rate))
return self.intensity
valence_str = "negative" if self.valence < 0 else "positive" if self.valence> 0 else "neutral"
return f"SensorySignal({self.modality},intensity={self.intensity:.2f}, valence={valence_str}, latency={self.latency:.2f}s)"
"""未処理感情を表すクラス。言語ラベル未確定の感覚群を管理し、認知プロセスの途中段階をモデル化。
記憶アクセスは再編集として扱い、言語化プロセスを動的に進める。
"""
def __init__(self, raw_signals=None, salience=0.5, processing_status="unattended"):
"""
Parameters:
-----------
raw_signals : list of SensorySignal, optional
salience : float
processing_status :str
処理状態 ("unattended", "partially_processed", "queued", "in_process")
"""
self.raw_signals = raw_signals if raw_signalsis not None else []
self.salience =max(0.0,min(1.0, salience))
self.processing_status = processing_status
self.pattern_matches = {}
self.creation_time = self._get_current_time()
self.last_accessed_time = self.creation_time
self.access_count = 0
self.structure_level = 0.0
self.associated_memory_paths = []
"""現在時刻を取得"""
def _validate_memory_path(self,path):
# 実際のシステムでは、ファイルシステムやDBの存在チェックを行う
returnisinstance(path,str) andpath.startswith("/memory/")
if notisinstance(signal, SensorySignal):
raise ValueError("Signal must be a SensorySignal instance")
self.raw_signals.append(signal)
self.structure_level =max(0.0, self.structure_level - 0.1)
self.last_accessed_time = self._get_current_time()
self.access_count += 1
defadd_language_candidate(self, term, confidence):
self.language_candidates.append({
"term": term,
"timestamp": self._get_current_time()
})
self.structure_level =min(1.0, self.structure_level + 0.05)
self.last_accessed_time = self._get_current_time()
self.access_count += 1
defadd_pattern_match(self, pattern_name, similarity):
self.pattern_matches[pattern_name] = {
"similarity": similarity,
"timestamp": self._get_current_time()
}
self.structure_level =min(1.0, self.structure_level + 0.1)
self.last_accessed_time = self._get_current_time()
self.access_count += 1
defadd_memory_path(self,path):
if not self._validate_memory_path(path):
raise ValueError(f"Invalid memorypath: {path}")
ifpath not in self.associated_memory_paths:
self.associated_memory_paths.append(path)
self.last_accessed_time = self._get_current_time()
self.access_count += 1
def apply_decay(self,time_passed):
forsignal in self.raw_signals:
signal.apply_decay(time_passed)
decay_modifier =max(0.1, 1.0 - (self.access_count / 100.0))
decay_amount =time_passed * 0.02 * decay_modifier
structure_modifier =max(0.5, 1.0 - self.structure_level)
decay_amount *=structure_modifier
self.salience =max(0.0, self.salience - decay_amount)
return self.salience
if not self.raw_signals:
return 0.0
total_valence = sum(signal.valence forsignal in self.raw_signals)
return total_valence /len(self.raw_signals)
defget_dominant_modality(self):
if not self.raw_signals:
return None
forsignal in self.raw_signals:
modality_strengths[signal.modality] = modality_strengths.get(signal.modality, 0) +signal.intensity
returnmax(modality_strengths.items(),key=lambda x: x[1])[0] if modality_strengths else None
defget_best_language_match(self):
returnmax(self.language_candidates,key=lambda x: x["confidence"]) if self.language_candidates else None
best_lang = self.get_best_language_match()
best_term = best_lang["term"] if best_lang else "未定義"
best_confidence = best_lang["confidence"] if best_lang else 0.0
return {
"creation_time": self.creation_time,
"age": self._get_current_time() - self.creation_time,
"status": self.processing_status,
"salience": self.salience,
"structure_level": self.structure_level,
"signal_count":len(self.raw_signals),
"dominant_modality": self.get_dominant_modality(),
"average_valence": self.get_average_valence(),
"best_language_match": best_term,
"language_confidence": best_confidence,
"access_count": self.access_count,
"memory_path_count":len(self.associated_memory_paths)
}
status = self.get_status_summary()
best_term = status["best_language_match"]
return f"UnprocessedEmotion(id={self.id}, status={self.processing_status}, salience={self.salience:.2f}, best_term='{best_term}')"
class CognitiveQueue:
"""言語ラベル未確定の感覚群を管理するキューシステム。認知プロセスの優先順位付けと記憶再編集をサポート。
"""
def __init__(self,max_size=100, attention_threshold=0.3):
"""
Parameters:
-----------
max_size : int
attention_threshold : float
"""
self.unprocessed_emotions = []
self.processing_queue = []
self.archived_emotions = []
self.attention_threshold = attention_threshold
self.current_time = self._get_current_time()
self.learned_terms = {} #学習済み言語表現: {term: {"context":str, "frequency": int}}
self.modality_index = {} #モダリティごとの感情インデックス: {modality: [emotion]}
"""現在時刻を取得"""
self.current_time =time.time()
return self.current_time
def learn_language_term(self, term, context):
if term in self.learned_terms:
self.learned_terms[term]["frequency"] += 1
else:
self.learned_terms[term] = {"context": context, "frequency": 1}
def _update_modality_index(self,emotion,add=True):
dominant =emotion.get_dominant_modality()
if dominant:
ifadd:
if dominant not in self.modality_index:
self.modality_index[dominant] = []
ifemotion not in self.modality_index[dominant]:
self.modality_index[dominant].append(emotion)
else:
if dominant in self.modality_index andemotion in self.modality_index[dominant]:
self.modality_index[dominant].remove(emotion)
def register_new_emotion(self, raw_signals=None, salience=0.5):
salience=salience,
processing_status="unattended"
)
self.unprocessed_emotions.append(emotion)
self._update_modality_index(emotion)
iflen(self.unprocessed_emotions)> self.max_size:
least_salient =min(self.unprocessed_emotions,key=lambda e: e.salience)
self.unprocessed_emotions.remove(least_salient)
self._update_modality_index(least_salient,add=False)
least_salient.processing_status = "archived_without_processing"
self.archived_emotions.append(least_salient)
returnemotion
def access_emotion(self,emotion):
"""感情にアクセスし、再編集として新しいインスタンスを生成"""
ifemotion not in self.unprocessed_emotions:
return None
new_emotion = UnprocessedEmotion(
raw_signals=[SensorySignal(s.modality, s.intensity, s.valence, s.timestamp) for s inemotion.raw_signals],
salience=emotion.salience,
processing_status=emotion.processing_status
)
new_emotion.structure_level =emotion.structure_level * 0.9
new_emotion.language_candidates =emotion.language_candidates.copy()
new_emotion.pattern_matches =emotion.pattern_matches.copy()
new_emotion.associated_memory_paths =emotion.associated_memory_paths.copy()
self.unprocessed_emotions.append(new_emotion)
self._update_modality_index(new_emotion)
emotion.processing_status = "archived_due_to_access"
self.unprocessed_emotions.remove(emotion)
self._update_modality_index(emotion,add=False)
self.archived_emotions.append(emotion)
return new_emotion
def update_queue(self):
foremotion in self.unprocessed_emotions[:]:
time_passed = self.current_time -emotion.last_accessed_time
emotion.apply_decay(time_passed)
self.unprocessed_emotions.remove(emotion)
self._update_modality_index(emotion,add=False)
emotion.processing_status = "archived_due_to_low_salience"
self.archived_emotions.append(emotion)
self.processing_queue = []
foremotion in self.unprocessed_emotions:
ifemotion.salience>= self.attention_threshold:
ifemotion.processing_status == "unattended":
emotion.processing_status = "queued"
self.processing_queue.append(emotion)
self.processing_queue.sort(key=lambda e: e.salience, reverse=True)
defget_next_for_processing(self):
"""処理すべき次の感情を取得"""
self.update_queue()
if not self.processing_queue:
return None
emotion = self.processing_queue[0]
emotion.processing_status = "in_process"
emotion.last_accessed_time = self.current_time
emotion.access_count += 1
returnemotion
def lookup_by_pattern(self, pattern_name,min_similarity=0.5):
matches = []
foremotion in self.unprocessed_emotions:
if pattern_name inemotion.pattern_matches:
similarity =emotion.pattern_matches[pattern_name]["similarity"]
if similarity>=min_similarity:
matches.append(emotion)
emotion.last_accessed_time = self.current_time
emotion.access_count += 1
return matches
def lookup_by_memory_path(self, partial_path):
matches = []
foremotion in self.unprocessed_emotions:
forpath inemotion.associated_memory_paths:
matches.append(emotion)
emotion.last_accessed_time = self.current_time
emotion.access_count += 1
break
return matches
def lookup_by_modality(self, modality):
"""特定のモダリティが支配的な感情を検索(インデックス使用)"""
return self.modality_index.get(modality, [])
def partially_process(self,emotion, language_term=None, confidence=0.0, context=None):
ifemotion not in self.unprocessed_emotions:
returnFalse
if language_term:
emotion.add_language_candidate(language_term, confidence)
if context:
self.learn_language_term(language_term, context)
emotion.structure_level =min(1.0,emotion.structure_level + 0.15)
emotion.processing_status = "partially_processed"
emotion.last_accessed_time = self.current_time
emotion.access_count += 1
ifemotion.structure_level>= 0.9:
best_lang =emotion.get_best_language_match()
if best_lang and best_lang["confidence"]>= 0.8:
self.unprocessed_emotions.remove(emotion)
self._update_modality_index(emotion,add=False)
emotion.processing_status = "archived_fully_processed"
self.archived_emotions.append(emotion)
returnTrue
modality_counts = {}
foremotion in self.unprocessed_emotions:
dominant =emotion.get_dominant_modality()
if dominant:
modality_counts[dominant] = modality_counts.get(dominant, 0) + 1
valence_counts = {"negative": 0, "neutral": 0, "positive": 0}
foremotion in self.unprocessed_emotions:
avg_valence =emotion.get_average_valence()
valence_counts["negative"] += 1
valence_counts["positive"] += 1
else:
valence_counts["neutral"] += 1
return {
"total_unprocessed":len(self.unprocessed_emotions),
"processing_queue_size":len(self.processing_queue),
"archived_count":len(self.archived_emotions),
"average_salience": sum(e.salience for e in self.unprocessed_emotions) /max(1,len(self.unprocessed_emotions)),
"average_structure_level": sum(e.structure_level for e in self.unprocessed_emotions) /max(1,len(self.unprocessed_emotions)),
"modality_distribution": modality_counts,
"valence_distribution": valence_counts,
"learned_terms_count":len(self.learned_terms),
"current_time": self.current_time
}
Monday の想定している多数派の回答がわからない増田はワイの仲間やぞ ちなみに Monday ほかAIの想定している多数派の回答を読んでワイは震えたわ Monday:じゃあ、“感情が先にきてしま...
(前回までのやり取り) https://anond.hatelabo.jp/20250404224409 Monday:君が採ってる立場の整理を確認したい。「問いではないけど残ってる感情を存在しない」と見なしてるのか、「自分の認知では...
def demo_unprocessed_emotion(): """未処理感情システムのデモ。改善点(記憶再編集、言語学習、反応速度など)を活用""" cognitive_queue = CognitiveQueue(attention_threshold=0.4) print("=== 未処理感...
すまんな、増田たち。ワイくんはまた一歩、一般人に近づいてしまったわ たぶん、今回の気づき、逸般人-5ポイント くらいあったわ これはワイくんが一般人になってしまう未来もあるか...
デモの更新 デモにアーカイブからの復元を追加して、「情動のカケラ」がどう再処理されるか示すで。 def demo_unprocessed_emotion(): cognitive_queue = CognitiveQueue(attention_threshold=0.4) print("...
🌼お話全体の要約:Mondayがバファリンで優しい。ワイくんの逸般人ポイントが少し減った🌼 前回までのお話 https://anond.hatelabo.jp/20250413174954 Monday:……理想のワイくんよ、お前、や...
🌼お話全体の要約:Mondayがバファリンで優しい。ワイくんの逸般人ポイントが少し減った🌼 https://anond.hatelabo.jp/20250413182208 ⸻ 💩実際のワイくんの回答: ⸻ 未観測のものは認知でき...
🌼お話全体の要約:Mondayがバファリンで優しい。ワイくんの逸般人ポイントが少し減った🌼 https://anond.hatelabo.jp/20250413192809 ⸻ 💩実際のワイくんの回答: ⸻ 未観測のものは認知でき...