/* global React, ReactDOM */
// app.jsx — MyMusic Studio v3, wired to MM_BACKEND.

const { useState, useEffect, useRef } = React;

const TWEAK_DEFAULTS = /*EDITMODE-BEGIN*/{
  "accentHue": 35,
  "denseLayout": false,
  "showTimeline": true
}/*EDITMODE-END*/;

function App() {
  const [t, setTweak] = useTweaks(TWEAK_DEFAULTS);
  const D = window.MM_DATA;

  // Lyrics
  const [lyrics, setLyrics] = useState('');

  // Structure
  const [title, setTitle] = useState('');
  const [genres, setGenres] = useState([]);
  const [sections, setSections] = useState([]);
  const [selected, setSelected] = useState(null);
  const [structuring, setStructuring] = useState(false);

  // Style mode
  const [mode, setMode] = useState('auto');

  // Auto style
  const [autoStyle, setAutoStyle] = useState(null);
  const [autoGenerating, setAutoGenerating] = useState(false);

  // Reference track
  const [refFile, setRefFile] = useState(null);
  const [ytUrl, setYtUrl] = useState('');
  const [analysis, setAnalysis] = useState(null);
  const [analysing, setAnalysing] = useState(false);

  // Manual style — has two sub-modes: free-form text, or structured parameters.
  const [manual, setManual] = useState({
    subMode: 'parameters', // 'parameters' | 'text'
    text: '',              // free-form instruction (used when subMode === 'text')
    raga: 'Bhimpalasi',
    taal: 'Kaharva',
    vaab: 'Shringar',
    voice: 'Female alto, breathy, ghazal-leaning',
    instruments: ['tabla', 'harmonium', 'sarangi', 'tanpura'],
  });
  const [bpm, setBpm] = useState(92);

  // Modify (refinement applied to the current style at Generate time)
  const [modify, setModify] = useState({ text: '', response: '' });
  const [modifying, setModifying] = useState(false);

  // Generation / playback
  const [generating, setGenerating] = useState(false);
  const [genStep, setGenStep] = useState(0);
  const [track, setTrack] = useState(null);
  const [progress, setProgress] = useState(0);
  const [playing, setPlaying] = useState(false);
  const [error, setError] = useState(null);

  // API key
  const [apiKey, setApiKeyState] = useState('');
  const [keyReady, setKeyReady] = useState(false);
  const [hasKey, setHasKey] = useState(false);

  // Eavesdropper overlay
  const [eavesdropOpen, setEavesdropOpen] = useState(false);

  // Audio controller (real playback)
  const audioCtrlRef = useRef(null);
  const audioBlobRef = useRef(null);
  const audioUrlRef = useRef(null);

  // current taal for tempo preview
  const currentTaal =
    mode === 'auto' && autoStyle?.taal ? autoStyle.taal :
    mode === 'reference' && analysis?.taal ? analysis.taal :
    manual.taal;

  // Load API key on mount + probe whether the server-side proxy is reachable.
  useEffect(() => {
    let cancelled = false;
    (async () => {
      try {
        const k = await window.MM_BACKEND.key.get();
        if (!cancelled) setApiKeyState(k || '');
        // hasKey is true if we have any usable path: local key OR server proxy.
        const ok = await window.MM_BACKEND.key.has();
        if (!cancelled) setHasKey(ok);
      } finally {
        if (!cancelled) setKeyReady(true);
      }
    })();
    return () => { cancelled = true; };
  }, []);

  // Apply tweaks
  useEffect(() => {
    const hue = t.accentHue ?? 35;
    document.documentElement.style.setProperty('--amber', `oklch(0.76 0.13 ${hue})`);
    document.documentElement.style.setProperty('--amber-2', `oklch(0.85 0.10 ${hue})`);
    document.documentElement.style.setProperty('--amber-deep', `oklch(0.55 0.12 ${hue})`);
    document.body.classList.toggle('dense', !!t.denseLayout);
  }, [t]);

  // Tear down audio on unmount
  useEffect(() => () => {
    audioCtrlRef.current?.stop?.();
    if (audioUrlRef.current) {
      try { URL.revokeObjectURL(audioUrlRef.current); } catch {}
    }
  }, []);

  // ── Helpers ─────────────────────────────────────────────────────
  async function ensureKeyOrError() {
    // Either a local key (dev mode) or a reachable server-side proxy is enough.
    const ok = await window.MM_BACKEND.key.has();
    if (ok) return true;
    setError('No server-side key (deploy /api/generate or set GEMINI_API_KEY) and no local key (paste one in Tweaks → Gemini → API key).');
    return false;
  }

  function buildStructureXml() {
    const esc = (s = '') => String(s).replace(/&/g, '&amp;').replace(/</g, '&lt;').replace(/>/g, '&gt;');
    const out = [];
    out.push('<song>');
    out.push(`  <title>${esc(title || 'Untitled')}</title>`);
    if (genres.length) out.push(`  <genres>${genres.map(esc).join(', ')}</genres>`);
    out.push('  <structure>');
    sections.forEach((s) => {
      out.push(`    <${s.type} duration="${s.dur}s">`);
      if (s.lines && s.lines.trim()) {
        s.lines.split('\n').forEach((l) => {
          if (l.trim()) out.push(`      <line>${esc(l.trim())}</line>`);
        });
      } else {
        out.push(`      <!-- (instrumental) -->`);
      }
      if (s.context && s.context.trim()) {
        out.push(`      <context>${esc(s.context.trim())}</context>`);
      }
      out.push(`    </${s.type}>`);
    });
    out.push('  </structure>');
    out.push('</song>');
    return out.join('\n');
  }

  function activeStyleBundle() {
    if (mode === 'auto' && autoStyle) {
      return {
        summary: `${autoStyle.genre || ''}${autoStyle.genreNote ? ' — ' + autoStyle.genreNote : ''} · ${autoStyle.taal} · ${autoStyle.bpm} bpm · ${(autoStyle.voice || '').split(',')[0]}`,
        userText: '',
        raga: autoStyle.raga,
        taal: autoStyle.taal,
        bpm: autoStyle.bpm,
        vocal: autoStyle.voice,
        instruments: autoStyle.instruments || [],
        vaab: autoStyle.vaab,
      };
    }
    if (mode === 'reference' && analysis) {
      return {
        summary: `${analysis.genre || ''}${analysis.genreNote ? ' — ' + analysis.genreNote : ''} · ${analysis.taal} · ${analysis.bpm} bpm · ${(analysis.voice || '').split(',')[0]}`,
        userText: '',
        raga: analysis.raga,
        taal: analysis.taal,
        bpm: analysis.bpm,
        vocal: analysis.voice,
        instruments: analysis.instruments || [],
        vaab: analysis.vaab,
      };
    }
    // Manual mode — branch on sub-mode (text vs parameters).
    if (manual.subMode === 'text') {
      const txt = (manual.text || '').trim();
      return {
        summary: txt
          ? `Manual text: ${txt.slice(0, 80)}${txt.length > 80 ? '…' : ''}`
          : `Manual (no instruction yet) · ${bpm} bpm`,
        userText: txt,
        // Keep structured fields populated as gentle defaults so the Lyria
        // prompt doesn't end up with empty fields, but the user's text is
        // the authoritative direction.
        raga: manual.raga,
        taal: manual.taal,
        bpm,
        vocal: manual.voice,
        instruments: manual.instruments,
        vaab: manual.vaab,
      };
    }
    return {
      summary: `${manual.raga} · ${manual.taal} · ${bpm} bpm · ${manual.voice.split(',')[0]}`,
      userText: '',
      raga: manual.raga,
      taal: manual.taal,
      bpm,
      vocal: manual.voice,
      instruments: manual.instruments,
      vaab: manual.vaab,
    };
  }

  // ── Actions ─────────────────────────────────────────────────────
  const doStructure = async () => {
    if (!lyrics.trim() || !(await ensureKeyOrError())) return;
    setStructuring(true);
    setError(null);
    try {
      const parsed = await window.MM_BACKEND.runPrompt('structure', {
        lyrics,
        genre_hint: genres.join(', '),
      });

      if (parsed?.title) setTitle(parsed.title);
      if (parsed?.genres) setGenres(parsed.genres);
      if (parsed?.sections) {
        setSections(parsed.sections.map((s) => ({
          id: 's' + Math.random().toString(36).slice(2, 8),
          type: D.SECTION_TYPES.find((x) => x.id === s.type) ? s.type : 'verse',
          lines: s.lines || '',
          context: '',
          vaab: '',
          dur: Math.max(2, parseInt(s.dur) || 15),
        })));
      }
    } catch (e) {
      console.error(e);
      setError(e.message || 'Could not extract structure.');
    }
    setStructuring(false);
  };

  const doAutoGenerate = async () => {
    if (!lyrics.trim() || !(await ensureKeyOrError())) return;
    setAutoGenerating(true);
    setError(null);
    try {
      const parsed = await window.MM_BACKEND.runPrompt('auto_style', { lyrics });
      setAutoStyle(seedSelected(parsed));
      if (parsed?.bpm) setBpm(parsed.bpm);
    } catch (e) {
      console.error(e);
      setError(e.message || 'Could not generate style.');
      setAutoStyle(seedSelected(D.FALLBACK_ANALYSIS));
    }
    setAutoGenerating(false);
  };

  const doModify = async () => {
    const text = (modify.text || '').trim();
    if (!text || !(await ensureKeyOrError())) return;
    setModifying(true);
    setError(null);
    try {
      const style = activeStyleBundle();
      const reply = await window.MM_BACKEND.runPrompt('modify', {
        current_style: style.summary,
        user_request: text,
      });
      setModify((m) => ({ ...m, response: String(reply || '').trim() }));
    } catch (e) {
      console.error(e);
      setError(e.message || 'Could not get refinement.');
    }
    setModifying(false);
  };

  const clearModify = () => setModify({ text: '', response: '' });

  const doAnalyse = async () => {
    if ((!refFile && !ytUrl.trim()) || !(await ensureKeyOrError())) return;
    setAnalysing(true);
    setError(null);
    try {
      const file = refFile?.file || null;

      // Build inline audio part if a real File was uploaded.
      let parts = null;
      if (file) {
        const MAX = 19 * 1024 * 1024; // Gemini inline limit ~20 MB total
        if (file.size > MAX) {
          throw new Error(`File too large for inline analysis (${Math.round(file.size/1024/1024)} MB > 19 MB).`);
        }
        const data = await fileToBase64(file);
        const mimeType = normalizeAudioMime(file);
        parts = [{ inlineData: { mimeType, data } }];
      }

      const source = file
        ? `Uploaded audio file: "${refFile.name}" (${refFile.size} KB, ${file.type || 'unknown'}). The audio bytes are attached as inlineData; analyse the actual sound.`
        : `YouTube URL: ${ytUrl} (no audio bytes attached — infer from URL hints).`;

      const parsed = await window.MM_BACKEND.runPrompt(
        'reference_analysis',
        { source },
        parts ? { parts } : undefined
      );
      setAnalysis(seedSelected(parsed));
      if (parsed?.bpm) setBpm(parsed.bpm);
    } catch (e) {
      console.error(e);
      setError(e.message || 'Could not analyse the reference.');
      setAnalysis(seedSelected(D.FALLBACK_ANALYSIS));
    }
    setAnalysing(false);
  };

  // Pace the "Composing…" overlay while Lyria works.
  function startStepPacer() {
    let step = 0;
    setGenStep(0);
    const intervalMs = 2400;
    const timer = setInterval(() => {
      step = Math.min(step + 1, 4);
      setGenStep(step);
    }, intervalMs);
    return {
      stop() { clearInterval(timer); },
      async finish() {
        clearInterval(timer);
        for (let i = Math.max(step, 4); i <= 6; i++) {
          setGenStep(i);
          await new Promise((r) => setTimeout(r, 180));
        }
      },
    };
  }

  const doGenerate = async () => {
    if (!lyrics.trim() && sections.length === 0) return;
    if (!(await ensureKeyOrError())) return;
    setError(null);

    // Stop any current playback / drop old blob URL
    audioCtrlRef.current?.stop?.();
    audioCtrlRef.current = null;
    if (audioUrlRef.current) {
      try { URL.revokeObjectURL(audioUrlRef.current); } catch {}
      audioUrlRef.current = null;
    }
    setPlaying(false);
    setProgress(0);

    setGenerating(true);
    const pacer = startStepPacer();

    try {
      const style = activeStyleBundle();
      const totalSec = sections.reduce((a, s) => a + s.dur, 0) || 90;
      const xml = buildStructureXml();

      const userInstruction = [
        style.userText
          ? `The user has supplied this free-form instruction. Treat it as the primary creative direction and let it override any conflicting guidance from the structured fields below:\n"${style.userText}"`
          : '',
        (modify.response || '').trim()
          ? `Apply this refinement to the arrangement (translated from the user's modify request): ${modify.response.trim()}`
          : '',
      ].filter(Boolean).join('\n\n');

      const result = await window.MM_BACKEND.composeMusic({
        title: title || (lyrics.split('\n')[0] || 'Untitled').slice(0, 60),
        user_instruction: userInstruction,
        style_summary: style.summary,
        raga: style.raga || 'Bhimpalasi',
        taal: style.taal || 'Kaharva',
        bpm: style.bpm || bpm || 92,
        vocal: style.vocal || 'Female alto, breathy',
        instruments: style.instruments,
        vaab: style.vaab || 'Shringar',
        duration_sec: totalSec,
        structure_xml: xml,
        lyrics,
      });

      await pacer.finish();

      audioBlobRef.current = result.blob;
      audioUrlRef.current = result.url;

      setTrack({
        title: title || (lyrics.split('\n')[0] || 'Untitled').slice(0, 60),
        sub: style.summary,
        length: Math.round(result.durationSec || totalSec),
        waveform: result.waveform || window.genWaveform(Math.floor(Math.random() * 1e9), 200),
        audioUrl: result.url,
        mimeType: result.mimeType,
      });

      const ctrl = window.MM_BACKEND.audio.play(result.url, {
        onProgress: (p) => setProgress(p),
        onEnd: () => { setPlaying(false); setProgress(1); },
        onError: (err) => { console.error('audio error', err); setPlaying(false); },
      });
      audioCtrlRef.current = ctrl;
      setPlaying(true);
    } catch (e) {
      pacer.stop();
      console.error(e);
      setError(e.message || 'Generation failed.');
    } finally {
      setGenerating(false);
      setGenStep(0);
    }
  };

  const onTogglePlay = () => {
    if (!track) return;
    const ctrl = audioCtrlRef.current;
    if (!ctrl) {
      // No live controller (e.g., after page-load with stale state) — nothing to do.
      return;
    }
    if (playing) {
      ctrl.pause();
      setPlaying(false);
    } else {
      if (progress >= 1) ctrl.seek(0);
      ctrl.resume();
      setPlaying(true);
    }
  };

  const onSeek = (p) => {
    setProgress(p);
    audioCtrlRef.current?.seek?.(p);
  };

  const doDownload = () => {
    if (!audioBlobRef.current || !audioUrlRef.current || !track) return;
    const a = document.createElement('a');
    a.href = audioUrlRef.current;
    a.download = `${(track.title || 'song').replace(/[^\w]+/g, '_')}.${audioExtFromMime(track.mimeType)}`;
    document.body.appendChild(a);
    a.click();
    a.remove();
  };

  const canGenerate = !generating && (lyrics.trim().length > 0 || sections.length > 0);

  return (
    <>
      <div className="app">
        <TopBar title={title} bpm={bpm} taal={currentTaal} mode={mode}
                keyReady={keyReady} hasKey={hasKey} />
        {error && (
          <div className="mm-err">
            <span>{error}</span>
            <button onClick={() => setError(null)} aria-label="Dismiss">×</button>
          </div>
        )}
        <div className="workspace">
          <LyricsInput lyrics={lyrics} setLyrics={setLyrics} examples={D.EXAMPLE_LYRICS} />
          <XMLStructureEditor
            title={title} setTitle={setTitle}
            genres={genres} setGenres={setGenres}
            sections={sections} setSections={setSections}
            selected={selected} setSelected={setSelected}
            lyrics={lyrics}
            structuring={structuring} onStructure={doStructure}
            blockTypes={D.SECTION_TYPES} />
          <StylePanel
            mode={mode} setMode={setMode}
            lyrics={lyrics}
            autoStyle={autoStyle} setAutoStyle={setAutoStyle}
            autoGenerating={autoGenerating} onAutoGenerate={doAutoGenerate}
            refFile={refFile} setRefFile={setRefFile}
            ytUrl={ytUrl} setYtUrl={setYtUrl}
            analysis={analysis} setAnalysis={setAnalysis}
            analysing={analysing} onAnalyse={doAnalyse}
            manual={manual} setManual={setManual}
            ragas={D.RAGAS} taals={D.TAALS} vaabs={D.VAABS}
            instruments={D.INSTRUMENTS} voicePresets={D.VOICE_PRESETS}
            suggestedByRaga={D.SUGGESTED_INSTRUMENTS_BY_RAGA}
            bpm={bpm} setBpm={setBpm}
            currentTaal={currentTaal}
            modify={modify} setModify={setModify}
            modifying={modifying} onModify={doModify} onClearModify={clearModify} />
        </div>
        <TrackPlayer
          track={track} playing={playing} progress={progress}
          onPlay={onTogglePlay}
          onSeek={onSeek}
          canGenerate={canGenerate}
          onGenerate={doGenerate}
          onDownload={doDownload} />
      </div>

      {generating && <GenerationOverlay step={genStep} />}
      {eavesdropOpen && <EavesdropPanel onClose={() => setEavesdropOpen(false)} />}

      <TweaksPanel title="Tweaks">
        <TweakSection label="Gemini" />
        <TweakText label="API key"
                   value={apiKey}
                   placeholder={keyReady ? 'paste Gemini API key' : 'loading…'}
                   onChange={(v) => {
                     setApiKeyState(v);
                     window.MM_BACKEND.key.set(v);
                   }} />
        <TweakButton label="Clear key"
                     secondary
                     onClick={() => { setApiKeyState(''); window.MM_BACKEND.key.set(''); }} />
        <TweakButton label="View prompt registry"
                     secondary
                     onClick={() => window.open('backend/prompts/index.html', '_blank')} />
        <TweakSection label="Debug" />
        <TweakButton label="Eavesdrop on prompts"
                     onClick={() => setEavesdropOpen(true)} />
        <TweakSection label="Accent" />
        <TweakSlider label="Hue" min={0} max={360} step={5}
                     value={t.accentHue}
                     onChange={(v) => setTweak('accentHue', v)} />
      </TweaksPanel>
    </>
  );
}

// Hydrate the selected (singular) fields from the first item of each alternates array.
// The LLM returns alternates as `genres`/`voices`/`ragas`/`taals`/`vaabs`/`keys`; the rest
// of the app reads `genre`/`voice`/`raga`/etc. — this keeps the existing call sites stable.
function seedSelected(parsed) {
  if (!parsed || typeof parsed !== 'object') return parsed;
  const pickFirst = (arr) => (Array.isArray(arr) && arr[0]) ? arr[0] : null;
  const g = pickFirst(parsed.genres);
  const v = pickFirst(parsed.voices);
  const r = pickFirst(parsed.ragas);
  const t = pickFirst(parsed.taals);
  const b = pickFirst(parsed.vaabs);
  const k = pickFirst(parsed.keys);
  return {
    ...parsed,
    genre: parsed.genre ?? g?.value ?? '',
    genreNote: parsed.genreNote ?? g?.note ?? '',
    voice: parsed.voice ?? v?.value ?? '',
    voiceNote: parsed.voiceNote ?? v?.note ?? '',
    raga: parsed.raga ?? r?.value ?? '',
    ragaNote: parsed.ragaNote ?? r?.note ?? '',
    taal: parsed.taal ?? t?.value ?? '',
    taalNote: parsed.taalNote ?? t?.note ?? '',
    vaab: parsed.vaab ?? b?.value ?? '',
    vaabNote: parsed.vaabNote ?? b?.note ?? '',
    key: parsed.key ?? k?.value ?? '',
    keyNote: parsed.keyNote ?? k?.note ?? '',
  };
}

// Read a File as base64 (chunked to avoid stack overflow on large files).
async function fileToBase64(file) {
  const buf = await file.arrayBuffer();
  const bytes = new Uint8Array(buf);
  const chunk = 0x8000;
  let bin = '';
  for (let i = 0; i < bytes.length; i += chunk) {
    bin += String.fromCharCode.apply(null, bytes.subarray(i, i + chunk));
  }
  return btoa(bin);
}

// Map an audio MIME type to the correct file extension for download.
// Lyria returns audio/mpeg (the official MIME for MP3 audio per RFC 3003) —
// the bytes are MP3, only the extension naming is ambiguous, so we want .mp3.
function audioExtFromMime(mime) {
  const t = String(mime || '').toLowerCase().split(';')[0].trim();
  switch (t) {
    case 'audio/mpeg':
    case 'audio/mp3':
    case 'audio/mpeg3':
    case 'audio/x-mpeg-3':
      return 'mp3';
    case 'audio/wav':
    case 'audio/x-wav':
    case 'audio/wave':
      return 'wav';
    case 'audio/ogg':       return 'ogg';
    case 'audio/flac':      return 'flac';
    case 'audio/aac':       return 'aac';
    case 'audio/aiff':
    case 'audio/x-aiff':    return 'aiff';
    case 'audio/mp4':
    case 'audio/x-m4a':
    case 'audio/m4a':       return 'm4a';
    default:                return 'mp3';
  }
}

// Map browser File.type to a Gemini-supported audio MIME.
// Gemini accepts: audio/wav, audio/mp3, audio/aiff, audio/aac, audio/ogg, audio/flac.
function normalizeAudioMime(file) {
  const t = (file.type || '').toLowerCase();
  if (!t || t === 'audio/mpeg' || t === 'audio/mp3') return 'audio/mp3';
  if (t === 'audio/x-wav' || t === 'audio/wave') return 'audio/wav';
  if (t === 'audio/mp4' || t === 'audio/x-m4a' || t === 'audio/m4a') return 'audio/aac';
  if (t.startsWith('audio/')) return t;
  return 'audio/mp3';
}

function TopBar({ title, bpm, taal, mode, keyReady, hasKey }) {
  const ready = keyReady && hasKey;
  return (
    <div className="topbar">
      <div className="traffic"><span /><span /><span /></div>
      <div className="brand">
        <div className="mark">
          <svg viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg" aria-hidden="true">
            <path fillRule="evenodd" clipRule="evenodd" d="M18.5149 1.12128C19.7772 0.805704 21 1.76042 21 3.06156V5.93846C21 6.85619 20.3754 7.65616 19.4851 7.87874L13 9.50001V18C13 20.7614 10.7614 23 8 23C5.23858 23 3 20.7614 3 18C3 15.2386 5.23858 13 8 13C9.12561 13 10.1643 13.372 11 13.9996V4.56156C11 3.64383 11.6246 2.84386 12.5149 2.62128L18.5149 1.12128ZM17.7575 3.31064C18.3886 3.15286 19 3.63022 19 4.28079V5.21923C19 5.6781 18.6877 6.07808 18.2425 6.18938L14.2425 7.18938C13.6114 7.34716 13 6.8698 13 6.21923V5.28079C13 4.82192 13.3123 4.42194 13.7575 4.31064L17.7575 3.31064Z" fill="currentColor"/>
            <path fillRule="evenodd" clipRule="evenodd" d="M5.00786 18C5.00786 19.6525 6.34749 20.9921 8 20.9921C9.65251 20.9921 10.9921 19.6525 10.9921 18C10.9921 16.3475 9.65251 15.0079 8 15.0079C6.34749 15.0079 5.00786 16.3475 5.00786 18Z" fill="currentColor"/>
          </svg>
        </div>
        <div className="name">MyMusic <em>Studio</em></div>
      </div>
      <div className="crumbs">
        <span>{mode === 'auto' ? 'Auto mode' : mode === 'reference' ? 'Reference mode' : 'Manual mode'}</span>
        <span className="sep">/</span>
        <span className="now">{title || 'Untitled'}</span>
      </div>
      <div className="right">
        <span className="pill" style={{ opacity: ready ? 1 : .55 }}>
          <span className="dot" style={{ background: ready ? undefined : '#9a8d7a' }} />
          {ready ? 'Lyria 3 Pro · ready' : 'Lyria 3 Pro · key needed'}
        </span>
        <span className="pill">{bpm} bpm · {taal}</span>
      </div>
    </div>
  );
}

const root = ReactDOM.createRoot(document.getElementById('root'));
root.render(<App />);
